最近在测一组时间序列数据,已经预处理好了。在pytorch里面搭建了一个LSTM用于训练与测试,但是效果不理想,运行起来也很慢,发现是GPU设置不成功,不知道怎么改才不会报错。希望来位有经验的朋友指点指点,我刚学不久,为了做课题,觉得太难啦!
1条回答 默认 最新
云哭了hia知道 2022-08-06 11:41关注# temp01标准化: x*=(x-mean)/std # 对训练集标准化转张量 from sklearn.preprocessing import StandardScaler scaler = StandardScaler() # 标准化 train_data_temp01 = scaler.fit_transform(train_temp01 .reshape(-1, 1)) train_data_temp01 = torch.FloatTensor(train_data_temp01).view(-1) # 张量 # u = modified_ws.mean() # print(u) # v = modified_ws.std() # print(v) # n=(modified_ws-u)/v # print(n) train_window = 128 # 每一批训练的个数 def create_inout_sequences(input_data, tw): # 该函数2个变量,分别为 input_data 以及 tw(表示train_window),label表示预测的 inout_seq = [] L = len(input_data) for i in range(L-tw): train_seq = input_data[i:i+tw] train_label = input_data[i+tw:i+tw+1] inout_seq.append((train_seq ,train_label)) return inout_seq train_inout_seq = create_inout_sequences(train_data_temp01, train_window) # 在这里 input 2个变量,train data的归一化数据,以及训练的windows class LSTM(nn.Module): def __init__(self, input_size=1, hidden_layer_size=100,batch_size=128, output_size=1,num_layers=2): super().__init__() self.hidden_layer_size = hidden_layer_size self.num_layers=num_layers self.lstm = nn.LSTM(input_size, hidden_layer_size) self.batch_size = batch_size self.batch_first = True self.linear = nn.Linear(hidden_layer_size, output_size) self.hidden_cell = (torch.zeros(1,1,self.hidden_layer_size), torch.zeros(1,1,self.hidden_layer_size)) def forward(self, input_seq): lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq) ,1, -1), self.hidden_cell) predictions = self.linear(lstm_out.view(len(input_seq), -1)) return predictions[-1] model = LSTM() loss_function = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.001) print(model) # 模型训练 epochs = 10 # inout_seq 是准备好的训练数据 for i in range(epochs): for seq, labels in train_inout_seq: optimizer.zero_grad() model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size), torch.zeros(1, 1, model.hidden_layer_size)) y_pred = model(seq) single_loss = loss_function(y_pred, labels) #损失函数 single_loss.backward() #前向 optimizer.step() if i%2 == 0: print(f'epoch: {i:3} loss: {single_loss.item():10.8f}') # print(f'epoch: {i:10} loss: {single_loss.item():10.10f}') # 预测 fut_pred = 6 test_inputs = train_data_temp01[-train_window:].tolist() # 最后12个 print(test_inputs) model.eval() # 预测的值为12个 for i in range(128): seq = torch.FloatTensor(test_inputs[-train_window:]) with torch.no_grad(): model.hidden = (torch.zeros(1, 1, model.hidden_layer_size), torch.zeros(1, 1, model.hidden_layer_size)) test_inputs.append(model(seq).item()) # 反归一化 actual_predictions = scaler.inverse_transform(np.array(test_inputs[train_window:] ).reshape(-1, 1)) print(actual_predictions) from sklearn.metrics import r2_score import math ''' a=test_temp01[20943:] print("r2 score: ",r2_score(a,actual_predictions)) ''' x = np.arange(83971, 104964, 1) # 50个值 # print(x) plt.title('temp01 vs num') plt.ylabel('temp01') plt.grid(True) plt.autoscale(axis='x', tight=True) plt.plot(x, test_temp01,'red',linewidth=0.3,label='true') # test_data有20993个数据真实值,取25个 plt.plot(x,actual_predictions,'blue',linewidth=0.3,label='prd') # 测30min plt.legend(loc='upper right') plt.show()解决 无用评论 打赏 举报