IULee520 2020-12-07 21:36
浏览 64

手写体识别时,pred_y 返回全都是0

mnist = tf.keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()

# 定义onehot函数:
def onehot(y, start, end, categories='auto'):
    oht = OneHotEncoder()
    a = np.linspace(start, end - 1, end - start)
    b = np.reshape(a, [-1, 1]).astype(np.int32)
    oht.fit(b)
    c = oht.transform(y).toarray()
    return c

y_train = np.reshape(y_train, [-1, 1]).astype(np.int32)
y_train = onehot(y_train, 0, 10)
y_test = np.reshape(y_test, [-1, 1]).astype(np.int32)
y_test = onehot(y_test, 0, 10)
X_train = np.reshape(X_train, [-1, 784]).astype(np.float32)
X_test = np.reshape(X_test, [-1, 784]).astype(np.float32)

x = tf.placeholder(tf.float32, [None, 784])  # 图像数据,N行784列
y = tf.placeholder(tf.float32, [None, 10])  # 输出数据(标签,即图像真实类别),N行10列
w = tf.Variable(tf.random_normal([784, 10]))  # 权重
b = tf.Variable(tf.zeros([10]))  # 偏置,一行十列

pred_y = tf.nn.softmax(tf.matmul(x, w) + b)
print(pred_y.shape)
# 损失函数(交叉熵)
cross_entropy = -tf.reduce_sum(y * tf.log(pred_y), reduction_indices=1)
cost = tf.reduce_mean(cross_entropy)
# 梯度下降优化器
optimizer = tf.train.GradientDescentOptimizer(0.001).minimize(cost)

# 批次大小
batch_size = 100
saver = tf.train.Saver()
model_path = './model/'  # 模型路径

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # 开始训练
    for epoch in range(10):
        total_batch = int(X_train.shape[0] / batch_size)  # 总批次
        avg_cost = 0.0

        for i in range(total_batch):
            batch_x = X_train[i * batch_size:(i + 1) * batch_size, :]
            # batch_x = np.reshape(batch_x, [-1, 784])
            batch_y = y_train[i * batch_size:(i + 1) * batch_size, :]
            op, c = sess.run([optimizer, cost],
                             feed_dict={x: batch_x, y: batch_y})

            avg_cost += (c / total_batch)
        print("epoch:%d, cost=%.9f" % (epoch + 1, avg_cost))
    print('训练结束')

    # 模型评估
    # 比较预测结果和真实值,返回布尔类型的数组
    correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(pred_y, 1))
    # 将布尔类型数组转换为浮点数,并计算准确率
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    print('准确率:', accuracy.eval({x: X_test, y: y_test}))
    print(sess.run(tf.argmax(y_test[:30], 1)), "Real Number")
    print(sess.run(tf.argmax(pred_y[:30], 1), feed_dict={x: X_test, y: y_test}), "Prediction Number")
    # 保存模型
    save_path = saver.save(sess, model_path)
    print('模型已保存')
  • 写回答

0条回答 默认 最新

    报告相同问题?

    悬赏问题

    • ¥15 如何处理复杂数据表格的除法运算
    • ¥15 如何用stc8h1k08的片子做485数据透传的功能?(关键词-串口)
    • ¥15 有兄弟姐妹会用word插图功能制作类似citespace的图片吗?
    • ¥200 uniapp长期运行卡死问题解决
    • ¥15 请教:如何用postman调用本地虚拟机区块链接上的合约?
    • ¥15 为什么使用javacv转封装rtsp为rtmp时出现如下问题:[h264 @ 000000004faf7500]no frame?
    • ¥15 乘性高斯噪声在深度学习网络中的应用
    • ¥15 关于docker部署flink集成hadoop的yarn,请教个问题 flink启动yarn-session.sh连不上hadoop,这个整了好几天一直不行,求帮忙看一下怎么解决
    • ¥15 深度学习根据CNN网络模型,搭建BP模型并训练MNIST数据集
    • ¥15 C++ 头文件/宏冲突问题解决