Model模型的代码如下:
class Net(nn.Module):
def __init__(self,input_size,hidden_size,num_layers,output_size,batch_size,seq_length) -> None:
super(Net,self).__init__()
self.input_size=input_size
self.hidden_size=hidden_size
self.num_layers=num_layers
self.output_size=output_size
self.batch_size=batch_size
self.seq_length=seq_length
self.num_directions=1 # 单向LSTM
self.liner1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.liner2 = nn.Linear(num_layers*hidden_size, output_size)
self.dropout = nn.Dropout(0.2)
self.lstm=nn.LSTM(input_size=hidden_size,hidden_size=hidden_size,num_layers=num_layers,batch_first=True,dropout=0.2) # LSTM层
self.fc=nn.Linear(hidden_size,output_size) # 全连接层
def forward(self,x):
batchsize = x.shape[0]
x = self.liner1(x)
x = self.relu(x)
h_0 = torch.randn(self.num_directions * self.num_layers, x.size(0), self.hidden_size).to('cuda')
c_0 = torch.randn(self.num_directions * self.num_layers, x.size(0), self.hidden_size).to('cuda')
output, (h_n, c_n) = self.lstm(x, (h_0, c_0)) # output(5, 30, 64)
output = h_n.permute(1,0,2).reshape(batchsize, -1) # 64,10,32 -> 64,32*2
pred = self.dropout(output)
pred = self.liner2(pred)
pred = pred[:, -1] # 修改后的预测 因为在前面reshape了
return pred
配置的代码如下:
# 参数设置
seq_length = 10 # 时间步长
input_size = 3 # 原本为3,现在为5, 删去postcode与time
num_layers = 2 # 4
hidden_size = 128 # 512??
batch_size = 64
n_iters = 10000 # 50000 5000
lr = 0.0001
output_size = 1
split_ratio = 0.9
path = 'data/raw_sales.csv'
moudle = Net(input_size, hidden_size, num_layers, output_size, batch_size, seq_length)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(moudle.parameters(), lr=lr)
scaler = MinMaxScaler()
跑出来的一个结果图是这样的:

不管怎么跑都是一条直线,简直晕了!