在用LSTM预测轴承剩余寿命时,得到如标题的错误:RuntimeError: Input and parameter tensors are not the same dtype, found input tensor with Double and parameter tensor with Float,可是我检查了输入都是float类型,还请大神解决一下。源代码如下:
#导入库 from numpy import * import matplotlib.pyplot as plt import scipy.io as scio import matplotlib.pyplot as plt import numpy as np import torch from torch import nn from torch.autograd import Variable from visdom import Visdom #读取数据 #导入训练集数据,mat格式 dataFile1 ='D:/!研究生学习生涯/!剩余寿命预测/LSTM相关资料/LSTM预测股票代码/基于LSTM模型的股票预测模型/normal_rms_1-1.mat' data1 = scio.loadmat(dataFile1) # 读入数据 feature1=data1['D'] # 读取mat里面的RMS数据 print(feature1) print(len(feature1)) print(feature1.shape) labels1=[] #设置训练集标签 for i in range(0,len(feature1)): if i<12740: labels1.append(0) else: labels1.append(0+(i-12740)/(len(feature1)-12740)) #导入测试集数据 dataFile2 ='D:/!研究生学习生涯/!剩余寿命预测/LSTM相关资料/LSTM预测股票代码/基于LSTM模型的股票预测模型/normal_rms_1-2.mat' data2 = scio.loadmat(dataFile2) # 读入数据 feature2=data2['D'] # 读取mat里面的RMS数据 print(feature2) print(len(feature2)) print(feature2.shape) labels2=[] #设置测试集标签 for i in range(0,len(feature2)): if i<9155: labels2.append(0) else: labels2.append(0+(i-9155)/(len(feature2)-9155)) labels1=np.array(labels1) labels2=np.array(labels2) dataX=[] dataY=[] #数据预处理,用过去的20天数据去预测未来的1天数据。 def create_dataset1(dataset1, look_back=20): for i in range(len(dataset1) - look_back): a = dataset1[i:(i + look_back)] dataX.append(a) return np.array(dataX) def create_dataset2(dataset2, look_back=20,pre_steps=1): for i in range(len(dataset2) - look_back): b=dataset2[i + look_back:i + look_back + pre_steps] dataY.append(b) return np.array(dataY) train_x=create_dataset1(feature1) # train_x=torch.tensor(train_x).to('cuda:1',torch.float32) train_y=create_dataset2(labels1) train_x=train_x.reshape(-1,20,1) train_y=train_y.reshape(-1,1) # train_x=train_x.reshape(-1,1,20) # train_y=train_y.reshape(-1,1,1) # train_y= train_y.astype(float) # train_y=np.array(train_y) test_x=create_dataset1(feature2) test_x=test_x.reshape(-1,1,20) test_y=create_dataset2(labels2) # test_x=np.array(test_x) # test_y=np.array(test_y) #处理数据维度满足模型输入条件 train_x=torch.tensor(train_x).to('cuda',torch.float64).reshape(-1,20,1) test_x=torch.tensor(test_x).to('cuda',torch.float64).reshape(-1,20,1) # train_y=torch.tensor(train_y).to('cuda',torch.float32) train_y=torch.from_numpy(train_y).to('cuda',torch.float64) test_y=torch.tensor(test_y).to('cuda',torch.float64) # test_y=torch.tensor(test_y).to('cuda') #设置模型超参数 input_size=1 hidden_size=20 cidden_size=20 output_size=1 lr=0.01 num_layers=1 #定义模型 class lstm(nn.Module): def __init__(self,input_size=1,hidden_size=20,num_layers=1): super(lstm,self).__init__() self.rnn=nn.LSTM(input_size,hidden_size,num_layers,batch_first=True) self.fc = nn.Linear(hidden_size, output_size) def forward(self,x): # x = self.rnn(x) # x is input, size (seq_len, batch, input_size) # s, b, h = x[0].shape # x is output, size (seq_len, batch, hidden_size) # x = x[0].view(s * b, h) # x = self.fc(x) # x = x.view(s, b, -1) # 把形状改回来 x = self.rnn(x)[1][0].view(-1,1) return x net=lstm().to('cuda') # input = torch.randn(20124, 20, 1).to('cuda') # out = net(input) # # criterion=nn.MSELoss() optimizer=torch.optim.Adam(net.parameters(),lr) print(net) for ii in range(1000): # train_x=torch.Tensor(train_x).to('cuda') # train_y=torch.Tensor(train_y).to('cuda') # train_x=train_x.to(torch.float32) # train_y=train_y.to(torch.float32) var_x=Variable(train_x) var_y=Variable(train_y) #前向传播 out=net(var_x) loss=criterion(out,var_y).to('cuda') #反向传播 optimizer.zero_grad() loss.backward() optimizer.step() if (e+1)%100== 0: print('Epoch: {}, Loss: {:.10f}'.format(e + 1, loss.data)) net=net.eval()#转换为测试模式 # test_x=torch.Tensor(test_x) # test_x=test_x.to(torch.float32) varx=Variable(test_x) pre_test=net(varx) pre_test = pre_test.view(-1).data.numpy() pre_test = np.concatenate((np.zeros(), pre_test)) plt.plot(test_y,pre_test) plt.show()