python 做lstm loss值很小了,但拟合值和真实值相差很大,预测值是条直线,怎么调参啊?
下面是一些代码参数,调停、正则化、学习率、epoch都调过了,怎么修改能改变下预测效果啊?
def buildmylstm(initactivation="relu", ininlr=0.001):
model = Sequential()
nb_time_steps=train_x.shape[1]
nb_input_vector=train_x.shape[2
# 添加LSTM层
model.add(Bidirectional(LSTM(units=64, input_shape=(nb_time_steps,nb_input_vector),
kernel_regularizer=keras.regularizers.l2(0.05),activation=initactivation)))
# 添加BN层
model.add(BatchNormalization())
model.add(Dropout(0.01))
# 添加全连接层
model.add(Dense(units=1))
# 编译模型
lr=ininlr
adam= optimizers.adam_v2.Adam(learning_rate=lr)
def scheduler(epoch):##编写学习率变化函数
##每隔epoch,学习率减小为原来的1/10
if epoch % 80==0 and epoch !=0:
lr=K.get_value(model.optimizer.lr)
K.set_value(model.optimizer.lr,lr*0.01)
print("lr changed to {}".format(lr*0.01))
return K.get_value(model.optimizer.lr)
model.compile(loss="mse",optimizer=adam,metrics=["mse"],run_eagerly=True)
##根据损失函数性质。回归建模一般选用“距离误差”作为损失函数,分类一般选“交叉熵”损失函数
# 添加早停法回调
early_stopping = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min')
reduce_lr=LearningRateScheduler(scheduler)
batchsize=int(len(all_data)/10)
epochs=max(80,batchsize*3)
history = model.fit(train_x,train_y, epochs=epochs,validation_data=(test_x, test_y), shuffle=False)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model train vs validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.show()
# 在测试时去除dropout层的权重
def set_dropout_trainable(layer, state):
layer.trainable = state
model.layers[-2].trainable = False #
设置Dropout层为不可训练
return model

