孤殇兮 2024-05-07 19:24 采纳率: 86.7%
浏览 8

关于#Python#的问题,如何解决?(相关搜索:深度学习|卷积神经网络)

model.pth文件提示图像未加载,尝试外部打开以修正格式问题

img


import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable
from PIL import Image

# 设置超参数
BATCH_SIZE = 60
EPOCHS = 60
model_lr = 0.005
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 数据预处理
transform = transforms.Compose([
    transforms.Resize([150, 150]),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

# 读取数据
dataset_train = datasets.ImageFolder(r'D:\代码\Python\深度分析\深度学习\实验一\datasets\train', transform)
print(dataset_train.imgs)
print(dataset_train.class_to_idx)

dataset_test = datasets.ImageFolder(r'D:\代码\Python\深度分析\深度学习\实验一\datasets\validation', transform)
print(dataset_test.class_to_idx)

train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)


class ConvNet(nn.Module):
    """简单的卷积神经网络模型定义"""

    def __init__(self):
        super(ConvNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 32, 3)
        self.max_pool1 = nn.MaxPool2d(2)
        self.conv2 = nn.Conv2d(32, 64, 3)
        self.max_pool2 = nn.MaxPool2d(2)
        self.conv3 = nn.Conv2d(64, 64, 3)
        self.conv4 = nn.Conv2d(64, 64, 3)
        self.max_pool3 = nn.MaxPool2d(2)
        self.conv5 = nn.Conv2d(64, 128, 3)
        self.conv6 = nn.Conv2d(128, 128, 3)
        self.max_pool4 = nn.MaxPool2d(2)
        self.fc0 = nn.Flatten()
        self.fc1 = nn.Linear(4608, 512)
        self.fc2 = nn.Linear(512, 3)

    def forward(self, x):
        x = self.conv1(x)
        x = self.max_pool1(x)
        x = self.conv2(x)
        x = self.max_pool2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.max_pool3(x)
        x = self.conv5(x)
        x = self.conv6(x)
        x = self.max_pool4(x)
        x = self.fc0(x)
        x = self.fc1(x)
        x = self.fc2(x)
        return x


model = ConvNet()
print(model)

if torch.cuda.is_available():
    model = model.cuda()
    model.eval()

optimizer = optim.Adam(model.parameters(), lr=model_lr)
criterion = nn.CrossEntropyLoss()


def adjust_learning_rate(optimizer, epoch):
    """每30个epochs将学习率调整为原来的1/10"""
    model_lr_new = model_lr * (0.1 ** (epoch // 30))
    for param_group in optimizer.param_groups:
        param_group['lr'] = model_lr_new


def train(model, device, train_loader, optimizer, epoch):
    """训练函数"""
    model.train()
    total_loss = 0
    total_samples = len(train_loader.dataset)

    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

        if (batch_idx + 1) % 50 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, (batch_idx + 1) * len(data), total_samples,
                       100. * (batch_idx + 1) / len(train_loader), loss.item()))

    average_loss = total_loss / len(train_loader)
    print('Epoch: {}, Average Loss: {:.4f}'.format(epoch, average_loss))


def val(model, device, test_loader):
    """验证函数"""
    model.eval()
    test_loss = 0
    correct = 0
    total_samples = len(test_loader.dataset)

    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            loss = criterion(output, target)
            test_loss += loss.item()
            _, pred = torch.max(output, 1)
            correct += torch.sum(pred == target).item()

    test_loss /= len(test_loader)
    accuracy = correct / total_samples * 100
    print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
        test_loss, correct, total_samples, accuracy))


# 训练和验证
for epoch in range(1, EPOCHS + 1):
    adjust_learning_rate(optimizer, epoch)
    train(model, DEVICE, train_loader, optimizer, epoch)
    val(model, DEVICE, test_loader)

# 保存模型状态字典
torch.save(model.state_dict(), 'model.pth')

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch.autograd import Variable

# 超参数设置
BATCH_SIZE = 60
EPOCHS = 60
modellr = 0.005

# 数据路径定义
train_data_path = r'D:\代码\Python\深度分析\深度学习\实验一\datasets\train'
test_data_path = r'D:\代码\Python\深度分析\深度学习\实验一\datasets\validation'


# 数据预处理
def preprocess_data():
    transform = transforms.Compose([
        transforms.Resize([150, 150]),
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    return transform


# 选择模型
def select_model():
    resnet = models.resnet18()
    if torch.cuda.is_available():
        resnet = resnet.cuda()
    resnet.eval()
    return resnet


# 训练函数
def train(model, device, train_loader, optimizer, criterion, epoch):
    model.train()
    sum_loss = 0
    total_num = len(train_loader.dataset)
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = Variable(data).to(device), Variable(target).to(device)
        output = model(data)
        loss = criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        sum_loss += loss.item()
        if (batch_idx + 1) % 50 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
                       100. * (batch_idx + 1) / len(train_loader), loss.item()))
    ave_loss = sum_loss / len(train_loader)
    print('Epoch: {}, Loss: {}'.format(epoch, ave_loss))


# 验证函数
def validate(model, device, test_loader, criterion):
    model.eval()
    test_loss = 0
    correct = 0
    total_num = len(test_loader.dataset)
    with torch.no_grad():
        for data, target in test_loader:
            data, target = Variable(data).to(device), Variable(target).to(device)
            output = model(data)
            loss = criterion(output, target)
            _, pred = torch.max(output.data, 1)
            correct += torch.sum(pred == target)
            test_loss += loss.item()
    correct = correct.data.item()
    accuracy = correct / total_num
    avg_loss = test_loss / len(test_loader)
    print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
          format(avg_loss, correct, len(test_loader.dataset), 100 * accuracy))


def adjust_learning_rate(optimizer, epoch):
    """将学习率设置为初始LR,每30个epoch衰减10"""
    model_lr_new = modellr * (0.1 ** (epoch // 5))
    for param_group in optimizer.param_groups:
        param_group['lr'] = model_lr_new


if __name__ == '__main__':
    transform = preprocess_data()
    dataset_train = datasets.ImageFolder(train_data_path, transform)
    dataset_test = datasets.ImageFolder(test_data_path, transform)

    print(dataset_train.imgs)
    print(dataset_train.class_to_idx)
    print(dataset_test.class_to_idx)

    train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)

    model = select_model()
    optimizer = optim.Adam(model.parameters(), lr=modellr)
    criterion = nn.CrossEntropyLoss()

    for epoch in range(1, EPOCHS + 1):
        adjust_learning_rate(optimizer, epoch)
        train(model, torch.device('cuda' if torch.cuda.is_available() else 'cpu'), train_loader, optimizer, criterion,
              epoch)
        validate(model, torch.device('cuda' if torch.cuda.is_available() else 'cpu'), test_loader, criterion)

    torch.save(model, 'model.pth')
  • 写回答

2条回答 默认 最新

  • GISer Liu 2024-05-08 03:43
    关注

    该回答引用自GPT-3.5,由博主GISer Liu编写:

    根据问题描述,您遇到的主要问题是在训练卷积神经网络时,保存模型后再加载提示图像未加载,以及尝试外部打开以修正格式问题。我将分步解决您的问题:

    1. 模型保存与加载问题
    • 您使用了 torch.save(model.state_dict(), 'model.pth') 来保存模型参数。这是一种常见的保存模型的方法,但在加载模型时,需要确保模型的结构与保存时一致。
    • 为了解决这个问题,首先需要确保您的模型结构在加载时与保存时一致。这意味着您需要重新构建模型,并加载之前保存的参数。
    • 下面是解决方案的代码示例:
      python
      Copy code
      # 重新定义模型结构
      class ConvNet(nn.Module):
      def __init__(self):
      super(ConvNet, self).__init__()
      # 模型结构与您之前定义的一致
      # 构建模型并加载之前保存的参数
      model = ConvNet()
      model.load_state_dict(torch.load('model.pth'))
      
    1. 尝试外部打开以修正格式问题
    • 在描述中未提供足够的信息来理解这个问题的具体性质。通常情况下,如果您的模型保存和加载时没有遇到格式问题,那么这个警告可能不是致命的。但如果您能提供更多关于这个问题的信息,我将会更好地帮助您解决它。
      通过以上方法,您应该能够成功保存和加载您的模型,并在训练过程中避免出现图像未加载的问题。如果您有更多的问题或需要进一步的解释,请随时提问。

    如果该回答解决了您的问题,请采纳!如果没有,请私信联系或评论您的疑惑

    评论

报告相同问题?

问题事件

  • 创建了问题 5月7日

悬赏问题

  • ¥20 基于MSP430f5529的MPU6050驱动,求出欧拉角
  • ¥20 Java-Oj-桌布的计算
  • ¥15 powerbuilder中的datawindow数据整合到新的DataWindow
  • ¥20 有人知道这种图怎么画吗?
  • ¥15 pyqt6如何引用qrc文件加载里面的的资源
  • ¥15 安卓JNI项目使用lua上的问题
  • ¥20 RL+GNN解决人员排班问题时梯度消失
  • ¥60 要数控稳压电源测试数据
  • ¥15 能帮我写下这个编程吗
  • ¥15 ikuai客户端l2tp协议链接报终止15信号和无法将p.p.p6转换为我的l2tp线路