这是《Machine Learning with Tensorflow》上的一个例程,它使用了batch如下
for step in range(training_epochs * train_size // batch_size):
offset = (step * batch_size) % train_size
batch_xs = xs[offset:(offset + batch_size), :]
batch_labels = labels[offset:(offset + batch_size)]
err, _ = sess.run([cost, train_op], feed_dict={X: batch_xs, Y: batch_labels})
if step % 100 == 0:
print (step, err)
此时程序正常运行,但如果把这一段改成:
for epoch in range(training_epochs):
err, _ = sess.run([cost, train_op], feed_dict={X:xs,Y:labels})
if(epoch%100==0):
print(epoch,err)
逻辑上没问题,但运行结果都是nan。这是怎么回事?
下面是完整程序
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
learning_rate = 0.01
training_epochs = 1000
num_labels = 3
batch_size = 100
x1_label0 = np.random.normal(1, 1, (100, 1))
x2_label0 = np.random.normal(1, 1, (100, 1))
x1_label1 = np.random.normal(5, 1, (100, 1))
x2_label1 = np.random.normal(4, 1, (100, 1))
x1_label2 = np.random.normal(8, 1, (100, 1))
x2_label2 = np.random.normal(0, 1, (100, 1))
xs_label0 = np.hstack((x1_label0, x2_label0))
xs_label1 = np.hstack((x1_label1, x2_label1))
xs_label2 = np.hstack((x1_label2, x2_label2))
xs = np.vstack((xs_label0, xs_label1, xs_label2))
labels = np.matrix([[1., 0., 0.]] * len(x1_label0) + [[0., 1., 0.]] * len(x1_label1) + [[0., 0., 1.]] * len(x1_label2))
arr = np.arange(xs.shape[0])
np.random.shuffle(arr)
xs = xs[arr, :]
labels = labels[arr, :]
train_size, num_features = xs.shape
X = tf.placeholder("float", shape=[None, num_features])
Y = tf.placeholder("float", shape=[None, num_labels])
W = tf.Variable(tf.zeros([num_features, num_labels]))
b = tf.Variable(tf.zeros([num_labels]))
y_model = tf.nn.softmax(tf.matmul(X, W) + b)
cost = -tf.reduce_sum(Y * tf.log(y_model))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
correct_prediction = tf.equal(tf.argmax(y_model, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
with tf.Session() as sess:
tf.initialize_all_variables().run()
# for step in range(training_epochs * train_size // batch_size):
# offset = (step * batch_size) % train_size
# batch_xs = xs[offset:(offset + batch_size), :]
# batch_labels = labels[offset:(offset + batch_size)]
# err, _ = sess.run([cost, train_op], feed_dict={X: batch_xs, Y: batch_labels})
# if step % 100 == 0:
# print (step, err)
for epoch in range(training_epochs):
err, _ = sess.run([cost, train_op], feed_dict={X:xs,Y:labels})
if(epoch%100==0):
print(epoch,err)
W_val = sess.run(W)
print('w', W_val)
b_val = sess.run(b)
print('b', b_val)