model.pth文件提示图像未加载,尝试外部打开以修正格式问题
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable
from PIL import Image
# 设置超参数
BATCH_SIZE = 60
EPOCHS = 60
model_lr = 0.005
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 数据预处理
transform = transforms.Compose([
transforms.Resize([150, 150]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
# 读取数据
dataset_train = datasets.ImageFolder(r'D:\代码\Python\深度分析\深度学习\实验一\datasets\train', transform)
print(dataset_train.imgs)
print(dataset_train.class_to_idx)
dataset_test = datasets.ImageFolder(r'D:\代码\Python\深度分析\深度学习\实验一\datasets\validation', transform)
print(dataset_test.class_to_idx)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)
class ConvNet(nn.Module):
"""简单的卷积神经网络模型定义"""
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.max_pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(32, 64, 3)
self.max_pool2 = nn.MaxPool2d(2)
self.conv3 = nn.Conv2d(64, 64, 3)
self.conv4 = nn.Conv2d(64, 64, 3)
self.max_pool3 = nn.MaxPool2d(2)
self.conv5 = nn.Conv2d(64, 128, 3)
self.conv6 = nn.Conv2d(128, 128, 3)
self.max_pool4 = nn.MaxPool2d(2)
self.fc0 = nn.Flatten()
self.fc1 = nn.Linear(4608, 512)
self.fc2 = nn.Linear(512, 3)
def forward(self, x):
x = self.conv1(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = self.max_pool2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.max_pool3(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.max_pool4(x)
x = self.fc0(x)
x = self.fc1(x)
x = self.fc2(x)
return x
model = ConvNet()
print(model)
if torch.cuda.is_available():
model = model.cuda()
model.eval()
optimizer = optim.Adam(model.parameters(), lr=model_lr)
criterion = nn.CrossEntropyLoss()
def adjust_learning_rate(optimizer, epoch):
"""每30个epochs将学习率调整为原来的1/10"""
model_lr_new = model_lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = model_lr_new
def train(model, device, train_loader, optimizer, epoch):
"""训练函数"""
model.train()
total_loss = 0
total_samples = len(train_loader.dataset)
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
total_loss += loss.item()
if (batch_idx + 1) % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, (batch_idx + 1) * len(data), total_samples,
100. * (batch_idx + 1) / len(train_loader), loss.item()))
average_loss = total_loss / len(train_loader)
print('Epoch: {}, Average Loss: {:.4f}'.format(epoch, average_loss))
def val(model, device, test_loader):
"""验证函数"""
model.eval()
test_loss = 0
correct = 0
total_samples = len(test_loader.dataset)
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
test_loss += loss.item()
_, pred = torch.max(output, 1)
correct += torch.sum(pred == target).item()
test_loss /= len(test_loader)
accuracy = correct / total_samples * 100
print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, total_samples, accuracy))
# 训练和验证
for epoch in range(1, EPOCHS + 1):
adjust_learning_rate(optimizer, epoch)
train(model, DEVICE, train_loader, optimizer, epoch)
val(model, DEVICE, test_loader)
# 保存模型状态字典
torch.save(model.state_dict(), 'model.pth')
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch.autograd import Variable
# 超参数设置
BATCH_SIZE = 60
EPOCHS = 60
modellr = 0.005
# 数据路径定义
train_data_path = r'D:\代码\Python\深度分析\深度学习\实验一\datasets\train'
test_data_path = r'D:\代码\Python\深度分析\深度学习\实验一\datasets\validation'
# 数据预处理
def preprocess_data():
transform = transforms.Compose([
transforms.Resize([150, 150]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
return transform
# 选择模型
def select_model():
resnet = models.resnet18()
if torch.cuda.is_available():
resnet = resnet.cuda()
resnet.eval()
return resnet
# 训练函数
def train(model, device, train_loader, optimizer, criterion, epoch):
model.train()
sum_loss = 0
total_num = len(train_loader.dataset)
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data).to(device), Variable(target).to(device)
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum_loss += loss.item()
if (batch_idx + 1) % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
100. * (batch_idx + 1) / len(train_loader), loss.item()))
ave_loss = sum_loss / len(train_loader)
print('Epoch: {}, Loss: {}'.format(epoch, ave_loss))
# 验证函数
def validate(model, device, test_loader, criterion):
model.eval()
test_loss = 0
correct = 0
total_num = len(test_loader.dataset)
with torch.no_grad():
for data, target in test_loader:
data, target = Variable(data).to(device), Variable(target).to(device)
output = model(data)
loss = criterion(output, target)
_, pred = torch.max(output.data, 1)
correct += torch.sum(pred == target)
test_loss += loss.item()
correct = correct.data.item()
accuracy = correct / total_num
avg_loss = test_loss / len(test_loader)
print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
format(avg_loss, correct, len(test_loader.dataset), 100 * accuracy))
def adjust_learning_rate(optimizer, epoch):
"""将学习率设置为初始LR,每30个epoch衰减10"""
model_lr_new = modellr * (0.1 ** (epoch // 5))
for param_group in optimizer.param_groups:
param_group['lr'] = model_lr_new
if __name__ == '__main__':
transform = preprocess_data()
dataset_train = datasets.ImageFolder(train_data_path, transform)
dataset_test = datasets.ImageFolder(test_data_path, transform)
print(dataset_train.imgs)
print(dataset_train.class_to_idx)
print(dataset_test.class_to_idx)
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)
model = select_model()
optimizer = optim.Adam(model.parameters(), lr=modellr)
criterion = nn.CrossEntropyLoss()
for epoch in range(1, EPOCHS + 1):
adjust_learning_rate(optimizer, epoch)
train(model, torch.device('cuda' if torch.cuda.is_available() else 'cpu'), train_loader, optimizer, criterion,
epoch)
validate(model, torch.device('cuda' if torch.cuda.is_available() else 'cpu'), test_loader, criterion)
torch.save(model, 'model.pth')