superhjr 2018-04-16 03:11 采纳率: 33.3%
浏览 2528
已结题

Tensorflow用自己的图片做数据集做识别,无法feed数据,跪求大神帮助!

使用tensorflow识别我自己的tfrecord文件时,在训练时无法feed数据,错误是placeholder那里,下面给出错误和我的代码,跪求大神帮助!!!
错误:

 Traceback (most recent call last):
  File "/Users/hanjiarong/PycharmProjects/sample5/main.py", line 206, in <module>
    session.run(opti, feed_dict={x: session.run(batch_image), y: session.run(batch_label), keep_drop: dropout})
  File "/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 905, in run
    run_metadata_ptr)
  File "/anaconda3/envs/tensorflow/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1113, in _run
    str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (1, 227, 227, 3) for Tensor 'Placeholder:0', which has shape '(154587, ?)'

下面是我的代码:

 import tensorflow as tf
from encode_to_tfrecords import create_record, create_test_record,  read_and_decode, get_batch, get_test_batch

n_input = 154587
n_classes = 3
dropout = 0.5
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.int32, [None, n_classes])
keep_drop = tf.placeholder(tf.float32)

class network(object):

    def inference(self, images,keep_drop):
    ####################################################################################################################
        # 向量转为矩阵

        #  images = tf.reshape(images, shape=[-1, 39,39, 3])

        images = tf.reshape(images, shape=[-1, 227, 227, 3])  # [batch, in_height, in_width, in_channels]

        images = (tf.cast(images, tf.float32) / 255. - 0.5) * 2  # 归一化处理
    ####################################################################################################################

        # 第一层  定义卷积偏置和下采样

        conv1 = tf.nn.bias_add(tf.nn.conv2d(images, self.weights['conv1'], strides=[1, 4, 4, 1], padding='VALID'),
                               self.biases['conv1'])

        relu1 = tf.nn.relu(conv1)

        pool1 = tf.nn.max_pool(relu1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')

        # 第二层

        conv2 = tf.nn.bias_add(tf.nn.conv2d(pool1, self.weights['conv2'], strides=[1, 1, 1, 1], padding='SAME'),
                               self.biases['conv2'])

        relu2 = tf.nn.relu(conv2)

        pool2 = tf.nn.max_pool(relu2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')

        # 第三层

        conv3 = tf.nn.bias_add(tf.nn.conv2d(pool2, self.weights['conv3'], strides=[1, 1, 1, 1], padding='SAME'),
                               self.biases['conv3'])

        relu3 = tf.nn.relu(conv3)

        #  pool3=tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

        conv4 = tf.nn.bias_add(tf.nn.conv2d(relu3, self.weights['conv4'], strides=[1, 1, 1, 1], padding='SAME'),
                               self.biases['conv4'])

        relu4 = tf.nn.relu(conv4)

        conv5 = tf.nn.bias_add(tf.nn.conv2d(relu4, self.weights['conv5'], strides=[1, 1, 1, 1], padding='SAME'),
                               self.biases['conv5'])

        relu5 = tf.nn.relu(conv5)

        pool5 = tf.nn.max_pool(relu5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID')

        # 全连接层1,先把特征图转为向量

        flatten = tf.reshape(pool5, [-1, self.weights['fc1'].get_shape().as_list()[0]])
        # dropout比率选用0.5

        drop1 = tf.nn.dropout(flatten, keep_drop)

        fc1 = tf.matmul(drop1, self.weights['fc1']) + self.biases['fc1']

        fc_relu1 = tf.nn.relu(fc1)

        fc2 = tf.matmul(fc_relu1, self.weights['fc2']) + self.biases['fc2']

        fc_relu2 = tf.nn.relu(fc2)

        fc3 = tf.matmul(fc_relu2, self.weights['fc3']) + self.biases['fc3']

        return fc3

    def __init__(self):
        # 初始化权值和偏置

        with tf.variable_scope("weights"):
            self.weights = {

                # 39*39*3->36*36*20->18*18*20

                'conv1': tf.get_variable('conv1', [11, 11, 3, 96],
                                         initializer=tf.contrib.layers.xavier_initializer_conv2d()),

                # 18*18*20->16*16*40->8*8*40

                'conv2': tf.get_variable('conv2', [5, 5, 96, 256],
                                         initializer=tf.contrib.layers.xavier_initializer_conv2d()),

                # 8*8*40->6*6*60->3*3*60

                'conv3': tf.get_variable('conv3', [3, 3, 256, 384],
                                         initializer=tf.contrib.layers.xavier_initializer_conv2d()),

                # 3*3*60->120

                'conv4': tf.get_variable('conv4', [3, 3, 384, 384],
                                         initializer=tf.contrib.layers.xavier_initializer_conv2d()),

                'conv5': tf.get_variable('conv5', [3, 3, 384, 256],
                                         initializer=tf.contrib.layers.xavier_initializer_conv2d()),

                'fc1': tf.get_variable('fc1', [6 * 6 * 256, 4096], initializer=tf.contrib.layers.xavier_initializer()),

                'fc2': tf.get_variable('fc2', [4096, 4096], initializer=tf.contrib.layers.xavier_initializer()),

                'fc3': tf.get_variable('fc3', [4096, 1000], initializer=tf.contrib.layers.xavier_initializer()),

            }

        with tf.variable_scope("biases"):
            self.biases = {

                'conv1': tf.get_variable('conv1', [96, ],
                                         initializer=tf.constant_initializer(value=0.1, dtype=tf.float32)),

                'conv2': tf.get_variable('conv2', [256, ],
                                         initializer=tf.constant_initializer(value=0.1, dtype=tf.float32)),

                'conv3': tf.get_variable('conv3', [384, ],
                                         initializer=tf.constant_initializer(value=0.1, dtype=tf.float32)),

                'conv4': tf.get_variable('conv4', [384, ],
                                         initializer=tf.constant_initializer(value=0.1, dtype=tf.float32)),

                'conv5': tf.get_variable('conv5', [256, ],
                                         initializer=tf.constant_initializer(value=0.1, dtype=tf.float32)),

                'fc1': tf.get_variable('fc1', [4096, ],
                                       initializer=tf.constant_initializer(value=0.1, dtype=tf.float32)),

                'fc2': tf.get_variable('fc2', [4096, ],
                                       initializer=tf.constant_initializer(value=0.1, dtype=tf.float32)),

                'fc3': tf.get_variable('fc3', [1000, ], initializer=tf.constant_initializer(value=0.1, dtype=tf.float32))

            }

    # 计算softmax交叉熵损失函数

    def sorfmax_loss(self, predicts, labels):
        predicts = tf.nn.softmax(predicts)

        labels = tf.one_hot(labels, self.weights['fc3'].get_shape().as_list()[1])

        loss = tf.nn.softmax_cross_entropy_with_logits(logits=predicts, labels=labels)

        #  loss =-tf.reduce_mean(labels * tf.log(predicts))# tf.nn.softmax_cross_entropy_with_logits(predicts, labels)

        self.cost = loss

        return self.cost

        # 梯度下降

    def optimer(self, loss, lr=0.01):
        train_optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss)

        return train_optimizer

        #定义训练
    # def train(self):
create_record('/Users/hanjiarong/Documents/testdata/tfrtrain')
# image, label = read_and_decode('train.tfrecords')
# batch_image, batch_label = get_batch(image, label, 30)

#连接网络 网络训练
net = network()
inf = net.inference(x, dropout)
loss = net.sorfmax_loss(inf,y)
opti = net.optimer(loss)
correct_pred = tf.equal(tf.argmax(inf, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# #定义测试
create_test_record('/Users/hanjiarong/Documents/testdata/tfrtest')
# image_t, label_t = read_and_decode('test.tfrecords')
# batch_test_image, batch_test_label = get_test_batch(image_t, label_t, 50)
#
# #生成测试
image, label = read_and_decode('train.tfrecords')
batch_image, batch_label = get_batch(image, label, 1)

# val, l = session.run([batch_image, batch_label])
# print(val.shape, l)


with tf.Session() as session:
    init = tf.initialize_all_variables()
    session.run(init)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
    max_iter = 100000
    iter = 1
    print("begin1")
    while iter * 30 < max_iter:
        # loss_np, _, label_np, image_np, inf_np = session.run([loss, opti, batch_label, batch_image, inf])
        session.run(opti, feed_dict={x: session.run(batch_image), y: session.run(batch_label), keep_drop: dropout})
        print("begin6")
        if iter % 10 == 0:
            loss, acc = session.run([loss, accuracy], feed_dict={x: batch_image, y: batch_label, keep_drop: 1.})
            print("Iter " + str(iter * 30) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc))
        iter += 1
        print("Optimization Finished!")
        image, label = read_and_decode('test.tfrecords')
        batch_test_image, batch_test_label = get_batch(image, label, 2)
        img_test, lab_test = session.run([batch_test_image, batch_test_label])
        test_accuracy = session.run(accuracy,
                                    feed_dict={x: img_test, y: lab_test, keep_drop: 1.})
        print("Testing Accuracy:", test_accuracy)
















  • 写回答

1条回答 默认 最新

  • xiaoxianshen595 2018-04-16 03:41
    关注
    评论

报告相同问题?

悬赏问题

  • ¥15 Matlab问题解答有两个问题
  • ¥50 Oracle Kubernetes服务器集群主节点无法访问,工作节点可以访问
  • ¥15 LCD12864中文显示
  • ¥15 在使用CH341SER.EXE时不小心把所有驱动文件删除了怎么解决
  • ¥15 gsoap生成onvif框架
  • ¥15 有关sql server business intellige安装,包括SSDT、SSMS。
  • ¥15 stm32的can接口不能收发数据
  • ¥15 目标检测算法移植到arm开发板
  • ¥15 利用JD51设计温度报警系统
  • ¥15 快手联盟怎么快速的跑出建立模型