#设损失函数 loss=(w+1)^2, 令w初值是常数5。反向传播就是求最优w,即求最小loss对应的w值
import tensorflow as tf
w = tf.Variable(tf.constant(5, dtype=tf.float32))
loss = tf.square(w+1)
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
STEPS = 40
for i in range(STEPS):
sess.run(train_step)
w_val = sess.run(w)
loss_val = sess.run(loss)
print("After %s steps: w is %f, loss is %f." % (i, w_val, loss_val)) # 打印
print(f"After {i} steps: w is {w_val}, loss is {loss_val}")
这段代码中的
print("After %s steps: w is %f, loss is %f." % (i, w_val, loss_val)) 和
print(f"After {i} steps: w is {w_val}, loss is {loss_val}")有什么区别, 为什么运行出来结果会不一样啊