import tensorflow as tf
import numpy as np
#from tensorflow.examples.tutorials.mnist import input_data
#载入数据集
#mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
#每个批次的大小
#batch_size = 100 #?????????????????????????????????
#计算一共有多少个批次
#n_batch = mnist.train.num_examples // batch_size
#定义placeholder
x_data=np.mat([[0.4984,0.5102,0.5213,0.5340],
[0.5102,0.5213,0.5340,0.5407],
[0.5213,0.5340,0.5407,0.5428],
[0.5340,0.5407,0.5428,0.5530],
[0.5407,0.5428,0.5530,0.5632],
[0.5428,0.5530,0.5632,0.5739],
[0.5530,0.5632,0.5739,0.5821],
[0.5632,0.5739,0.5821,0.5920],
[0.5739,0.5821,0.5920,0.5987],
[0.5821,0.5920,0.5987,0.6043],
[0.5920,0.5987,0.6043,0.6095],
[0.5987,0.6043,0.6095,0.6161],
[0.6043,0.6095,0.6161,0.6251],
[0.6095,0.6161,0.6251,0.6318],
[0.6161,0.6251,0.6318,0.6387],
[0.6251,0.6318,0.6387,0.6462],
[0.6318,0.6387,0.6462,0.6518],
[0.6387,0.6462,0.6518,0.6589],
[0.6462,0.6518,0.6589,0.6674],
[0.6518,0.6589,0.6674,0.6786],
[0.6589,0.6674,0.6786,0.6892],
[0.6674,0.6786,0.6892,0.6988]])
y_data=np.mat([[0.5407],
[0.5428],
[0.5530],
[0.5632],
[0.5739],
[0.5821],
[0.5920],
[0.5987],
[0.6043],
[0.6095],
[0.6161],
[0.6251],
[0.6318],
[0.6387],
[0.6462],
[0.6518],
[0.6589],
[0.6674],
[0.6786],
[0.6892],
[0.6988],
[0.7072]])
xs = tf.placeholder(tf.float32,[None,4]) # 样本数未知,特征数为1,占位符最后要以字典形式在运行中填入
ys = tf.placeholder(tf.float32,[None,1])
#创建一个简单的神经网络
W1 = tf.Variable(tf.truncated_normal([4,10],stddev=0.1))
b1 = tf.Variable(tf.zeros([10])+0.1)
L1 = tf.nn.tanh(tf.matmul(x,W1)+b1)
W2 = tf.Variable(tf.truncated_normal([10,1],stddev=0.1))
b2 = tf.Variable(tf.zeros([1])+0.1)
L2 = tf.nn.softmax(tf.matmul(L1,W2)+b2)
#二次代价函数
#loss = tf.reduce_mean(tf.square(y-prediction))
#loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys,logits=prediction))
loss = tf.reduce_mean(tf.reduce_sum(tf.square((ys-L2)),reduction_indices = [1]))#需要向相加索引号,redeuc执行跨纬度操作
#使用梯度下降法
#train_step = tf.train.GradientDescentOptimizer(0.1).mnimize(loss)
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
#train = tf.train.GradientDescentOptimizer(0.1).minimize(loss) # 选择梯度下降法
#初始化变量
#init = tf.global_variables_initializer()
init = tf.initialize_all_variables()
#结果存放在一个布尔型列表中
#correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))
#求准确率
#accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
with tf.Session() as sess:
sess.run(init)
for epoch in range(21):
for i in range(22):
#batch_xs,batch_ys = mnist.train.next_batch(batch_size) #?????????????????????????
sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
#test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0})
#train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0})
print (sess.run(prediction,feed_dict={xs:x_data,ys:y_data}))
提示:WARNING:tensorflow:From <ipython-input-10-578836c021a3>:89 in <module>.: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use tf.global_variables_initializer
instead.
InvalidArgumentError Traceback (most recent call last)
C:\Users\Administrator\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
1020 try:
-> 1021 return fn(*args)
1022 except errors.OpError as e:
C:\Users\Administrator\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
1002 feed_dict, fetch_list, target_list,
-> 1003 status, run_metadata)
1004
。。。