根据旧的代码把新的代码给补全
旧的代码:
import pandas as pd
def parse_date(date_string):
return pd.Timestamp(date_string.replace('_', '-'))
df = pd.read_csv('D:/LSTMdata.csv', index_col='Date', parse_dates=True, date_parser=parse_date)
df.sort_index(inplace=True)
def Stock_Price_LSTM_Data_Precesing(df,mem_his_days,pre_days):
df.dropna(inplace=True)
df.sort_index(inplace=True)
df['label']= df['Close'].shift(-pre_days)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
sca_X=scaler.fit_transform(df.iloc[:,:-1])
mem_his_days = 10
from collections import deque
deq = deque(maxlen=mem_his_days)
X = []
for i in sca_X:
deq.append(list(i))
if len(deq)==mem_his_days:
X.append(list(deq))
X_lately = X[-pre_days:]
X = X[:-pre_days]
y = df['label'].values[mem_his_days-1:-pre_days]
import numpy as np
X = np.array(X)
y = np.array(y)
return X,y,X_lately
X,y,X_lately = Stock_Price_LSTM_Data_Precesing(df,5,10)
print(len(X))
print(len(y))
print(len(X_lately))
pre_days = 10
mem_days=[5,10,15]
lstm_layers=[1,2,3]
dense_layers=[1,2,3]
units = [16,32]
# mem_days=[10]
# lstm_layers=[1]
# dense_layers=[1]
# units = [32]
from tensorflow.keras.callbacks import ModelCheckpoint
for the_mem_days in mem_days:
for the_lstm_layers in lstm_layers:
for the_dense_layers in dense_layers:
for the_units in units:
filepath=filepath=f"./theLSTMbestmodel1/{{val_mape:.2f}}{{epoch:02d}}men{the_mem_days}lstm{the_lstm_layers}dense{the_dense_layers}unit{the_units}.keras"
checkpoint = ModelCheckpoint(
filepath=filepath,
save_weights_only=False,
monitor='val_mape',
mode='min',
save_best_only=True)
X,y,X_lately = Stock_Price_LSTM_Data_Precesing(df,the_mem_days,pre_days)
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,shuffle=False,test_size=0.1)
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM,Dense,Dropout
model = Sequential()
model.add(LSTM(the_units,input_shape=X.shape[1:],activation='relu',return_sequences=True))
model.add(Dropout(0.1))
for i in range(the_lstm_layers):
model.add(LSTM(the_units,activation='relu',return_sequences=True))
model.add(Dropout(0.1))
model.add(LSTM(the_units,activation='relu'))
model.add(Dropout(0.1))
for i in range(the_dense_layers):
model.add(Dense(the_units,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1))
model.compile(optimizer='adam',
loss='mse',
metrics=['mape'])
model.fit(X_train,y_train,batch_size=32,epochs=50,validation_data=(X_test,y_test),callbacks=[checkpoint])
补全新的代码
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Dropout, LSTM, LayerNormalization, TimeDistributed
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import MeanAbsolutePercentageError
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras import backend as K
def parse_date(date_string):
return pd.Timestamp(date_string.replace('_', '-'))
def Stock_Price_WDCNNSeqData_Precesing(df, mem_his_days, pre_days):
# ...保持原有数据预处理部分不变...
# 新增WD-CNN部分
def dilated_causal_conv(input, filters, kernel_size, rate):
conv = tf.keras.layers.Conv1D(filters=filters, kernel_size=kernel_size, padding="causal", dilation_rate=rate)
x = conv(input)
return x
def attention_layer(x):
x = tf.expand_dims(x, axis=-1)
w = tf.keras.layers.Dense(1)(x)
w = tf.nn.softmax(w, axis=1)
output = tf.reduce_sum(tf.multiply(x, w), axis=1)
return output
def WDCNN_block(inputs, filters, kernel_size, num_blocks):
x = inputs
for _ in range(num_blocks):
x = dilated_causal_conv(x, filters, kernel_size, rate=1)
x = LayerNormalization()(x)
x = tf.nn.relu(x)
x = dilated_causal_conv(x, filters, kernel_size, rate=2)
x = LayerNormalization()(x)
x = tf.nn.relu(x)
return x
X = []
for i in sca_X:
deq.append(list(i))
if len(deq) == mem_his_days:
X.append(list(deq))
X_lately = X[-pre_days:]
X = X[:-pre_days]
y = df['label'].values[mem_his_days - 1:-pre_days]
# 将X转换为4维张量,以便与WD-CNN输入兼容
X = np.reshape(X, (-1, mem_his_days, 1, len(sca_X[0])))
# WD-CNN 部分
input_seq = Input(shape=(mem_his_days, 1, len(sca_X[0])))
x = WDCNN_block(input_seq, filters=the_units, kernel_size=3, num_blocks=2)
x = attention_layer(x)
# 原有LSTM部分
x = LSTM(the_units, activation='relu', return_sequences=True)(x)
x = Dropout(0.1)(x)
for i in range(the_lstm_layers - 1):
x = LSTM(the_units, activation='relu', return_sequences=True)(x)
x = Dropout(0.1)(x)
x = LSTM(the_units, activation='relu')(x)
x = Dropout(0.1)
for i in range(the_dense_layers):
x = Dense(the_units, activation='relu')(x)
x = Dropout(0.1)
outputs = Dense(1)(x)
model = Model(inputs=input_seq, outputs=outputs)
# ...保持优化器、损失函数和训练过程不变...