代码报错如下
Traceback (most recent call last):
File "/root/autodl-tmp/project/twst2.py", line 274, in <module>
loss, r2, MAE = train(model, optimizer, criterion, batch_data, batch_label)
File "/root/autodl-tmp/project/twst2.py", line 117, in train
SSR = ((output - train_label) ** 2).sum().cpu().detach().numpy()
File "/root/miniconda3/lib/python3.8/site-packages/torch/tensor.py", line 630, in __array__
return self.numpy()
RuntimeError: Can't call numpy() on Tensor that requires grad. Use tensor.detach().numpy() instead.
相关代码如下:
def train(model, optimizer, criterion, train_data, train_label):
# train_label,train_data = train_label.to(device), train_data.to(device)
model.train()
optimizer.zero_grad()
output = model(train_data)
output = output.squeeze()
train_label = train_label.astype(float)
train_label_tensor = torch.tensor(train_label, dtype=torch.float32).to(device)
#train_label = train_label.float()xiugai
mean_train_label = torch.mean(train_label_tensor)
SST = ((train_label_tensor - mean_train_label) ** 2).sum()
# SST = ((train_label - torch.mean(train_label)) ** 2).sum()
SSR = ((output - train_label) ** 2).sum().cpu().detach().numpy()
#SSR = SSR.detach().cpu().numpy()
r2 = 1 - SSR / SST
MAE = torch.mean(torch.abs(train_label - output))
loss = criterion(output, train_label)
# loss = sum(loss_num)/len(loss_num)
loss.backward()
optimizer.step()
loss_num = torch.sqrt(loss)
# r2 = torch.tensor(r2)
return loss_num, r2, MAE
batch_size = 256
num_batches = len(train_data) // batch_size
# test_num_batches = len(test_data) // batch_size\
for epoch in range(num_epochs):
train_loss_sum = np.zeros(1)
tr_mae_sum = np.zeros(1)
tr_r2_sum = np.zeros(1)
for i in range(num_batches):
start_idx = i * batch_size
end_idx = (i + 1) * batch_size
batch_data = train_data[start_idx:end_idx].unsqueeze(1)
batch_label = train_label[start_idx:end_idx]
# train_loss.append(train(model, optimizer, criterion, batch_data, batch_label))
loss, r2, MAE = train(model, optimizer, criterion, batch_data, batch_label)
train_loss = loss.cpu().detach().numpy()
train_r2 = r2.cpu().detach().numpy()
train_MAE = MAE.cpu().detach().numpy()
# train_loss.append(train_loss)
train_loss_sum = np.array(train_loss_sum) + train_loss
tr_r2_sum = np.array(tr_r2_sum) + train_r2
tr_mae_sum = np.array(tr_mae_sum) + train_MAE
train_r2_mean = tr_r2_sum / num_batches
train_loss_mean = train_loss_sum / num_batches
train_mae_mean = tr_mae_sum / num_batches
with torch.no_grad():
val_outputs = model(vali_data.unsqueeze(1))
val_output = val_outputs.squeeze()
vali_label = vali_label.float()
val_loss = criterion(val_output, vali_label)
val_loss = torch.sqrt(val_loss)
SST = ((vali_label - torch.mean(vali_label)) ** 2).sum()
SSR = ((val_output - vali_label) ** 2).sum()
val_r2 = 1 - SSR / SST
val_mae = torch.mean(torch.abs(vali_label - val_output))
print(f'Epoch [{epoch + 1}/{num_epochs}], Train Loss: {train_loss_mean.item()}, Train r2: {train_r2_mean.item()}, Train mae: {train_mae_mean.item()}, Val Loss: {val_loss.item()}, Val r2: {val_r2.item()}, Val mae: {val_mae.item()}')