Tensorflow建一个神经网络，输出数据只有一个谱型，且杂乱

``````import tensorflow as tf
import numpy as np
# 添加层
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
Wx_plus_b = tf.matmul(inputs, Weights)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b)
return outputs
# 1.训练的数据
p=np.reshape(p_1,(3,100000))
s=np.reshape(s_1,(250,100000))
pmin=p.min()
pmax=p.max()
p_train=(p-pmin)/(pmax-pmin)
smin=s.min()
smax=s.max()
s_train=(s-smin)/(smax-smin)
p_train=np.transpose(p_train)
s_train=np.transpose(s_train)
p_train=p_train.tolist()
s_train=s_train.tolist()
# 2.测试的数据
p2=np.reshape(p_2,(3,5501))
s2=np.reshape(s_2,(250,5501))
pmin2=p2.min()
pmax2=p2.max()
p_test=(p2-pmin2)/(pmax2-pmin2)
smin2=s2.min()
smax2=s2.max()
s_test=(s2-smin2)/(smax2-smin2)
p_test=np.transpose(p_test)
s_test=np.transpose(s_test)
p_test=p_test.tolist()
s_test=s_test.tolist()

# 3.定义占位符
px = tf.placeholder(tf.float32, [None, 3])
sx = tf.placeholder(tf.float32, [None,250])
sy=tf.placeholder(tf.float32,[None,250])
# 4.定义神经层：隐藏层和预测层
l1 = add_layer(px, 3, 200, n_layer=1,activation_function=tf.nn.sigmoid)
prediction = add_layer(l2, 200, 250, n_layer=3,activation_function=None)

# 5.定义 loss 表达式 mse
loss = tf.reduce_mean(tf.square(sx - prediction))
#loss2

# 6.选择 optimizer 使 loss 达到最小

#7.初始化变量
init=tf.initialize_all_variables()
#8.定义会话
sess = tf.Session()
#9.运行
sess.run(init)
#10.查看loss变化
for step in range(1000):
sess.run(train_step, feed_dict={px:p_train, sx:s_train})
if step % 50 == 0:
print(sess.run(loss,feed_dict={sx:s_train,px:p_train}))

prediction_test=sess.run(prediction,feed_dict={px:p_test})

``````

2个回答

weixin_38960656 超级感谢，扩大了几倍训练次数，结果好了很多！