lutongdrr 2017-07-31 06:44 采纳率: 0%
浏览 5479
已结题

关于tensorflow训练自己的tfrecord数据集问题

import os
import tensorflow as tf
from PIL import Image
import matplotlib.pyplot as plt
import readfileTFRecord
import input_data_record
def weight_varible(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)

def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)

def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

#mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print("Loading Done!")

sess = tf.InteractiveSession()

paras

W_conv1 = weight_varible([5, 5, 1, 32])
b_conv1 = bias_variable([32])

conv layer-1

x = tf.placeholder(tf.float32, [None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])

h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

conv layer-2

W_conv2 = weight_varible([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

full connection

W_fc1 = weight_varible([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

dropout

keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

output layer: softmax

W_fc2 = weight_varible([1024, 10])
b_fc2 = bias_variable([10])

y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_ = tf.placeholder(tf.float32, [None, 10])

model training

cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

sess.run(tf.initialize_all_variables())
img, label = readfileTFRecord.read_and_decode("train_min.tfrecords")

img_batch, label_batch = tf.train.shuffle_batch([img, label],
batch_size=3, capacity=30,
min_after_dequeue=9)
#img_batch,label_batch = input_data_record.get_batch(img,label,28,28,3,30)
init = tf.initialize_all_variables()

#with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess,coord=coord)
try:
for i in range(30):
if coord.should_stop():
break
val, l= sess.run([img_batch, label_batch])
#l = to_categorical(l, 12)

    train_accuacy = accuracy.eval(feed_dict={x: val, y_: l, keep_prob: 1.0})
    print("step %d, training accuracy %g"%(i, train_accuacy))
    sess.graph.finalize()
    train_step.run(feed_dict = {x: val, y_: l, keep_prob: 1.0}) 
    print(val.shape, l)

except tf.errors.OutOfRangeError:
print('Done training --epoch limit reached')
finally:
coord.request_stop()
coord.join(threads)
sess.close()

报错:
ValueError: Cannot feed value of shape (3, 28, 28, 1) for Tensor u'Placeholder:0', which has shape '(?, 784)'

  • 写回答

1条回答 默认 最新

  • Jangle_ 2017-08-01 08:16
    关注

    你的placeholder设置的是784,你往里面传就先reshape成784啊

    评论

报告相同问题?

悬赏问题

  • ¥15 有兄弟姐妹会用word插图功能制作类似citespace的图片吗?
  • ¥200 uniapp长期运行卡死问题解决
  • ¥15 请教:如何用postman调用本地虚拟机区块链接上的合约?
  • ¥15 为什么使用javacv转封装rtsp为rtmp时出现如下问题:[h264 @ 000000004faf7500]no frame?
  • ¥15 乘性高斯噪声在深度学习网络中的应用
  • ¥15 关于docker部署flink集成hadoop的yarn,请教个问题 flink启动yarn-session.sh连不上hadoop,这个整了好几天一直不行,求帮忙看一下怎么解决
  • ¥15 深度学习根据CNN网络模型,搭建BP模型并训练MNIST数据集
  • ¥15 C++ 头文件/宏冲突问题解决
  • ¥15 用comsol模拟大气湍流通过底部加热(温度不同)的腔体
  • ¥50 安卓adb backup备份子用户应用数据失败