损失降不下来甚至有升高的趋势是怎么回事
import torch
import numpy as np
from sklearn import datasets
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
x = datasets.load_iris().data
y = datasets.load_iris().target
x = torch.from_numpy(np.float32(x))
y = torch.from_numpy(np.float32(y))
x_data = x[:120]
x_test = x[120:]
y_data = x[:120]
y_test = x[120:]
train_data = TensorDataset(x_data, y_data)
train_data = DataLoader(train_data, batch_size=16, shuffle=True)
test_data = TensorDataset(x_test, y_test)
test_data = DataLoader(test_data, batch_size=16)
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.hidden = torch.nn.Linear(4, 16)
self.out = torch.nn.Linear(16, 4)
def forward(self, x):
x = torch.nn.functional.relu(self.hidden(x))
y = self.out(x)
return y
model = MyModel()
lea = 0.001
cost = torch.nn.functional.cross_entropy
optimizer = torch.optim.SGD(model.parameters(), lr=lea)
def test_mse(datasets):
loss = 0
for data, label in datasets:
batch_loss = cost(model(data), label)
loss += batch_loss
return loss
# 一般在训练模型时加上model.train(),这样会正常使用BatchNormalization和Dropout
# 测试的时候一般选择model.eval(),这样就不会使用BatchNormalization和Dropout
for epoch in range(100):
model.train()
for data, label in test_data:
batch_loss = cost(model(data), label)
batch_loss.backward()
optimizer.step()
optimizer.zero_grad()
model.eval()
if (epoch + 1) % 10 == 0:
print('训练集loss {}\t测试集loss {}'.format(test_mse(train_data)/120,test_mse(test_data)/30))
输出结果如下