cnn代码报错提问
报错信息如下:
/root/autodl-tmp/project/twst2.py:108: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor).
train_data = torch.tensor(train_data, device=device, dtype=torch.float32)
Traceback (most recent call last):
File "/root/autodl-tmp/project/twst2.py", line 279, in <module>
loss, r2, MAE = train(model, optimizer, criterion, batch_data, batch_label)
File "/root/autodl-tmp/project/twst2.py", line 121, in train
SSR = ((output - train_label) ** 2).sum().clone().detach().numpy()
File "/root/miniconda3/lib/python3.8/site-packages/torch/tensor.py", line 630, in __array__
return self.numpy()
RuntimeError: Can't call numpy() on Tensor that requires grad. Use tensor.detach().numpy() instead.
代码如下:
def train(model, optimizer, criterion, train_data, train_label):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_data = torch.tensor(train_data).to(device)
train_label = train_label.astype(np.float32)
train_label = torch.tensor(train_label).to(device)
train_label,train_data = train_label.to(device), train_data.to(device)
model.train()
optimizer.zero_grad()
output = model(train_data)
output = output.squeeze()
train_label = train_label.cpu().numpy().astype(float)
train_label_tensor = torch.tensor(train_label, dtype=torch.float32).to(device)
#train_label = train_label.float()xiugai
mean_train_label = torch.mean(train_label_tensor)
SST = ((train_label_tensor - mean_train_label) ** 2).sum()
SSR = ((output - train_label) ** 2).sum()..clone().detach().numpy()
SSR = SSR.detach().numpy()
r2 = 1 - SSR / SST
MAE = torch.mean(torch.abs(train_label - output))
loss = criterion(output, train_label)
# loss = sum(loss_num)/len(loss_num)
loss.backward()
optimizer.step()
loss_num = torch.sqrt(loss).detach().numpy
# r2 = torch.tensor(r2)
return loss_num, r2, MAE