Cần giúp đỡ về CNN trong tensorflow "tensorflow.python.framework.errors_impl.InvalidArgumentError: Input to reshape is a tensor with 19200 values, but the requested shape requires a multiple of 3700"



  • Em có doan code từ: https://github.com/dennybritz/cnn-text-classification-tf
    dược modified lại kết hợp 2 mạng:
    import tensorflow as tf
    import numpy as np

    class Text_CNN_RNN(object):
    """
    A neural network mixed (CNN, RNN) for text classification.
    Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
    Uses an embedding layer, GRUCell and output states of RNN
    """

    def __init__(self, sequence_length, num_classes, vocab_size, embedding_size,
                 filter_sizes, num_filters, hidden_unit, max_hidden_unit, l2_reg_lambda=0.0):
    
        # Placeholders for input, output and dropout
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
        self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
        self.batch_size = tf.placeholder(tf.int32, name='batch_size')
        self.pad = tf.placeholder(tf.float32, [None, 1, embedding_size, 1], name='pad')
        self.real_len = tf.placeholder(tf.int32, [None], name='real_len')
    
        # Keeping track of l2 regularization loss (optional)
        l2_loss = tf.constant(0.0)
        reduced = np.int32(np.ceil((sequence_length) * 1.0 / 4))
    
        # Embedding layer
        with tf.device('/cpu:0'), tf.name_scope("embedding"):
            W = tf.Variable(
                tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
                name="W")
            self.embedded_chars = tf.nn.embedding_lookup(W, self.input_x)
            self.embedded_chars_pad = tf.expand_dims(self.embedded_chars, -1)
            #emb = tf.expand_dims(self.embedded_chars, -1)
        '''
        # Zero paddings so that the convolution output have dimension batch x sequence_length x emb_size x channel
        num_prio = (filter_sizes[0] - 1) // 2
        num_post = (filter_sizes[0] - 1) - num_prio
        pad_prio = tf.concat(1, [self.pad] * num_prio)
        pad_post = tf.concat(1, [self.pad] * num_post)
        self.embedded_chars_pad = tf.concat(1, [pad_prio, emb, pad_post])
        '''
        # Convolution neural network (Create a convolution + maxpool layer for each filter size)
        pooled_outputs = []
        for i, filter_size in enumerate(filter_sizes):
            with tf.name_scope("conv-maxpool-%s" % filter_size):
                # Convolution Layer
                filter_shape = [filter_size, embedding_size, 1, num_filters]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
                b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
                conv = tf.nn.conv2d(
                    self.embedded_chars_pad,
                    W,
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")
    
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
    
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
    
                pooled_outputs.append(pooled)
    
        print('reduced:', num_classes)
        print('sequence_length:', sequence_length)
        print('filter_size:', filter_size)
        print('num_filters:', num_filters)
        print('embedding_size:', embedding_size)
        print('self.embedded_chars:', self.embedded_chars)
        print('self.embedded_chars_pad:', self.embedded_chars_pad)
    
        # Combine all the pooled features
        num_filters_total = num_filters * len(filter_sizes)
        self.h_pool = tf.concat(3, pooled_outputs)
        self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
        
        gru_cell = tf.nn.rnn_cell.GRUCell(num_units=hidden_unit)
        gru_cell = tf.nn.rnn_cell.DropoutWrapper(gru_cell, output_keep_prob=self.dropout_keep_prob)
        self._initial_state = gru_cell.zero_state(self.batch_size, tf.float32)
    
        inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, sequence_length, self.embedded_chars)]
        outputs, state = tf.nn.rnn(gru_cell, inputs, initial_state=self._initial_state, dtype=tf.float32,
                                      sequence_length=self.real_len)
    
        print('self.h_pool:', self.h_pool)
        print('self.h_pool_flat:', self.h_pool_flat)
        print('inputs:', inputs)
        print('outputs:', outputs)
    
        # Merge result of two neural CNN (max pooling) and RNN (outputs)
        pooled_output = tf.reshape(self.h_pool, [-1, sequence_length, num_filters])
        reshape_output = tf.stack(outputs, axis=1)
    
        merge_outputs = tf.concat( 2, [pooled_output, reshape_output])
        outputs_final = [tf.squeeze(input_, [1]) for input_ in tf.split(1, sequence_length, merge_outputs)]
    
        print('pooled_output: ', pooled_output)
        print('reshape_output:', reshape_output)
        print('merge_outputs:', merge_outputs)
        print('outputs final: ', outputs_final)
        
        # Collect the appropriate last words into variable output (dimension = batch x embedding_size)
        output = outputs_final[0]
        with tf.variable_scope('Output'):
            tf.get_variable_scope().reuse_variables()
            one = tf.ones([1, max_hidden_unit], tf.float32)
            for i in range(1, len(outputs_final)):
                ind = self.real_len < (i + 1)
                ind = tf.to_float(ind)
                ind = tf.expand_dims(ind, -1)
                mat = tf.matmul(ind, one)
                output = tf.add(tf.mul(output, mat), tf.mul(outputs_final[i], 1.0 - mat))
    
        with tf.name_scope('output'):
            W = tf.Variable(tf.truncated_normal([max_hidden_unit, num_classes], stddev=0.1), name='W')
            b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name='b')
            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(output, W, b, name='scores')
            self.predictions = tf.argmax(self.scores, 1, name='predictions')
    
        """
        # Code run only CNN:
        # Add dropout
        with tf.name_scope("dropout"):
            self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
    
        # Final (unnormalized) scores and predictions
        with tf.name_scope("output"):
            W = tf.get_variable("W", shape=[num_filters_total, num_classes], initializer=tf.contrib.layers.xavier_initializer())
            b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
            l2_loss += tf.nn.l2_loss(W)
            l2_loss += tf.nn.l2_loss(b)
            self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
            self.predictions = tf.argmax(self.scores, 1, name="predictions")
        """
        # CalculateMean cross-entropy loss
        with tf.name_scope("loss"):
            losses = tf.nn.softmax_cross_entropy_with_logits(self.scores, self.input_y)
            self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
    
        # Accuracy
        with tf.name_scope("accuracy"):
            correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
            self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
    

    #Nhưng lại có error:
    /Library/Frameworks/Python.framework/Versions/3.6/bin/python3.6 /Users/annguyen/Documents/bitbucket/sa_twitter/train.py

    Parameters:
    ALLOW_SOFT_PLACEMENT=True
    BATCH_SIZE=64
    CHECKPOINT_EVERY=100
    DEV_SAMPLE_PERCENTAGE=0.1
    DROPOUT_KEEP_PROB=0.5
    EMBEDDING_DIM=300
    EVALUATE_EVERY=100
    FILTER_SIZES=3
    HIDDEN_UNIT=300
    L2_REG_LAMBDA=0.0
    LOG_DEVICE_PLACEMENT=False
    MAX_HIDDEN_UNIT=400
    MAX_POOL_SIZE=4
    NEGATIVE_DATA_FILE=./data/train-small/rt-polarity.neg
    NEUTRAL_DATA_FILE=./data/train-small/rt-polarity.neu
    NUM_CHECKPOINTS=5
    NUM_EPOCHS=200
    NUM_FILTERS=100
    POSITIVE_DATA_FILE=./data/train-small/rt-polarity.pos
    UNROLLED_LSTM=False

    Loading data...
    Vocabulary Size: 5315
    Train/Dev split: 1032/114
    reduced: 3
    sequence_length: 37
    filter_size: 3
    num_filters: 100
    embedding_size: 300
    self.embedded_chars: Tensor("embedding/embedding_lookup:0", shape=(?, 37, 300), dtype=float32, device=/device:CPU:0)
    self.embedded_chars_pad: Tensor("concat_2:0", shape=(?, 39, 300, 1), dtype=float32)
    self.h_pool: Tensor("concat_3:0", shape=(?, 3, 1, 100), dtype=float32)
    self.h_pool_flat: Tensor("Reshape:0", shape=(?, 100), dtype=float32)
    inputs: [<tf.Tensor 'Squeeze:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_1:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_2:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_3:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_4:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_5:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_6:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_7:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_8:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_9:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_10:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_11:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_12:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_13:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_14:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_15:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_16:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_17:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_18:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_19:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_20:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_21:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_22:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_23:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_24:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_25:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_26:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_27:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_28:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_29:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_30:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_31:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_32:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_33:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_34:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_35:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'Squeeze_36:0' shape=(?, 300) dtype=float32>]
    outputs: [<tf.Tensor 'RNN/cond/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_1/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_2/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_3/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_4/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_5/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_6/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_7/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_8/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_9/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_10/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_11/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_12/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_13/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_14/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_15/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_16/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_17/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_18/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_19/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_20/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_21/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_22/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_23/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_24/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_25/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_26/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_27/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_28/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_29/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_30/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_31/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_32/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_33/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_34/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_35/Merge:0' shape=(?, 300) dtype=float32>, <tf.Tensor 'RNN/cond_36/Merge:0' shape=(?, 300) dtype=float32>]
    pooled_output: Tensor("Reshape_1:0", shape=(?, 37, 100), dtype=float32)
    reshape_output: Tensor("stack:0", shape=(?, 37, 300), dtype=float32)
    merge_outputs: Tensor("concat_4:0", shape=(?, 37, 400), dtype=float32)
    outputs final: [<tf.Tensor 'Squeeze_37:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_38:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_39:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_40:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_41:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_42:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_43:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_44:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_45:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_46:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_47:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_48:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_49:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_50:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_51:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_52:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_53:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_54:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_55:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_56:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_57:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_58:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_59:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_60:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_61:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_62:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_63:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_64:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_65:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_66:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_67:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_68:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_69:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_70:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_71:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_72:0' shape=(?, 400) dtype=float32>, <tf.Tensor 'Squeeze_73:0' shape=(?, 400) dtype=float32>]
    output in read form output: Tensor("Output/Add_35:0", shape=(?, 400), dtype=float32)
    predictions in read form output: Tensor("output/predictions:0", shape=(?,), dtype=int64)
    scores in read form output: Tensor("output/scores:0", shape=(?, 3), dtype=float32)
    Writing to /Users/annguyen/Documents/bitbucket/sa_twitter/runs/1493801472

    Traceback (most recent call last):
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1021, in _do_call
    return fn(*args)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1003, in _run_fn
    status, run_metadata)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/contextlib.py", line 89, in exit
    next(self.gen)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py", line 469, in raise_exception_on_not_ok_status
    pywrap_tensorflow.TF_GetCode(status))
    tensorflow.python.framework.errors_impl.InvalidArgumentError: Input to reshape is a tensor with 19200 values, but the requested shape requires a multiple of 3700
    [[Node: Reshape_1 = Reshape[T=DT_FLOAT, Tshape=DT_INT32, _device="/job:localhost/replica:0/task:0/cpu:0"](concat_3, Reshape_1/shape)]]

    During handling of the above exception, another exception occurred:

    Traceback (most recent call last):
    File "/Users/annguyen/Documents/bitbucket/sa_twitter/train.py", line 204, in <module>
    training_data(folder_name, timestamp)
    File "/Users/annguyen/Documents/bitbucket/sa_twitter/train.py", line 190, in training_data
    train_step(x_batch, y_batch)
    File "/Users/annguyen/Documents/bitbucket/sa_twitter/train.py", line 159, in train_step
    [train_op, global_step, train_summary_op, cnn_rnn.loss, cnn_rnn.accuracy], feed_dict)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 766, in run
    run_metadata_ptr)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 964, in _run
    feed_dict_string, options, run_metadata)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1014, in _do_run
    target_list, options, run_metadata)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1034, in _do_call
    raise type(e)(node_def, op, message)
    tensorflow.python.framework.errors_impl.InvalidArgumentError: Input to reshape is a tensor with 19200 values, but the requested shape requires a multiple of 3700
    [[Node: Reshape_1 = Reshape[T=DT_FLOAT, Tshape=DT_INT32, _device="/job:localhost/replica:0/task:0/cpu:0"](concat_3, Reshape_1/shape)]]

    Caused by op 'Reshape_1', defined at:
    File "/Users/annguyen/Documents/bitbucket/sa_twitter/train.py", line 204, in <module>
    training_data(folder_name, timestamp)
    File "/Users/annguyen/Documents/bitbucket/sa_twitter/train.py", line 93, in training_data
    l2_reg_lambda=FLAGS.l2_reg_lambda)
    File "/Users/annguyen/Documents/bitbucket/sa_twitter/text_cnn_rnn.py", line 98, in init
    pooled_output = tf.reshape(self.h_pool, [-1, sequence_length, num_filters])
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 2448, in reshape
    name=name)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 759, in apply_op
    op_def=op_def)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2240, in create_op
    original_op=self._default_original_op, op_def=op_def)
    File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1128, in init
    self._traceback = _extract_stack()

    InvalidArgumentError (see above for traceback): Input to reshape is a tensor with 19200 values, but the requested shape requires a multiple of 3700
    [[Node: Reshape_1 = Reshape[T=DT_FLOAT, Tshape=DT_INT32, _device="/job:localhost/replica:0/task:0/cpu:0"](concat_3, Reshape_1/shape)]]

    Process finished with exit code 1

    Mong mọi người có thể giúp em.



  • Dữ liệu của bạn có số chiều khác nhau, bạn có thể xóa hết các model và checkpoint đi và thử lại xem có được không, còn nếu vẫn không được thì phải nâng input dim lên


Hãy đăng nhập để trả lời
 

Có vẻ như bạn đã mất kết nối tới Cộng đồng Python Việt Nam, vui lòng đợi một lúc để chúng tôi thử kết nối lại.