tjdnbj 2024-02-27 11:16 采纳率: 36.8%
浏览 52
已结题

动手学习深度学习LENET模型提问

以下是我的代码,运行后出现了'LeNet' object has no attribute 'paramerters'的错误,请问该如何修改呢?

import time
import torch
import torch
from torch import nn,optim
from torch.nn import init
import torchvision
import torchvision.transforms as transforms
import sys
sys.path.append("C:/Users/zyx20/Desktop/深度学习编程/pythonProject")
import d2lzh_pytorch as d2l
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')

batch_size=256
if sys.platform.startswith('win'):
    num_workers = 0  # 0表示不用额外的进程来加速读取数据
else:
    num_workers = 4

mnist_train = torchvision.datasets.FashionMNIST(root='C:/Users/zyx20/Desktop/深度学习编程/MNIST/raw', train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='C:/Users/zyx20/Desktop/深度学习编程/MNIST/raw', train=False, download=True, transform=transforms.ToTensor())
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)

class LeNet(nn.Module):
    def __init__(self):
        super(LeNet,self).__init__()
        self.conv=nn.Sequential(nn.Conv2d(1,6,5),
                                nn.Sigmoid(),
                                nn.MaxPool2d(2,2),
                                nn.Conv2d(6,16,5),
                                nn.Sigmoid(),
                                nn.MaxPool2d(2,2))
        self.fc=nn.Sequential(
            nn.Linear(16*4*4,120),
            nn.Sigmoid(),
            nn.Linear(120,84),
            nn.Sigmoid(),
            nn.Linear(84,10)
        )
    def foward(self,img):
        feature=self.conv(img)
        output=self.fc(feature.view(img.shape[0],-1))
        return output
net=LeNet()

def evaluate_accuracy(data_iter,net,device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):
    acc_sum,n=0.0,0
    with torch.no_grad():
        for X,y in data_iter:
            if isinstance(net,torch.nn.Module):
                net.eval()#评估模式,这会关闭dropout
                acc_sum+=(net(X.to(device)).argmax(dim=1)==y.to(device)).float().sum().cpu().item()
                net.train()#改回训练模式
            else:
                if('is_training' in net.__code__.co_varnames):#如果有is_training这个参数
                    #将is_training设置为False
                    acc_sum+=(net(X,is_training=False).argmax(dim=1)==y).float().sum().cpu().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().cpu().item()
            n += y.shape[0]
    return acc_sum/n

def train_ch5(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):
    net=net.to(device)
    print("training on",device)
    loss=torch.nn.CrossEntropyLoss
    batch_count=0
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n,start = 0.0, 0.0, 0,time.time()
        for X,y in train_iter:
            y_hat = net(X)
            X=X.to(device)
            y=y.to(device)
            l=loss(y_hat,y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()
            train_l_sum+=l.cpu().item()
            train_acc_sum+=(y_hat.argmax(dim=1)==y).sum().cpu().item()
            n += y.shape[0]
            batch_count+=1
        test_acc=evaluate_accuracy(test_iter,net)
        print('epoch %d,loss %.4f,train acc %.3f,test acc %.3f%,time %.lf sec'%(epoch+1,train_l_sum/batch_count,train_acc_sum/n,test_acc,time.time()-start))

lr,num_epochs=0.001,5
optimizer=torch.optim.Adam(net.paramerters(),lr=lr)
train_ch5(net,train_iter,test_iter,num_epochs,batch_size,optimizer,device,num_epochs)

  • 写回答

3条回答 默认 最新

  • GISer Liu 2024-02-27 11:23
    关注

    该回答引用自GPT-3.5,由博主GISer Liu编写:

    问题分析: 错误提示显示为"'LeNet' object has no attribute 'paramerters'",这表明在使用torch.optim.Adam时尝试访问net对象的parameters属性时出现了拼写错误。
    解决方案:

    1. 修改代码中的拼写错误,将paramerters改为parameters
    2. 在创建优化器时确保传入正确的参数。
      修改后的代码如下所示:
      pythonCopy code
      optimizer=torch.optim.Adam(net.parameters(), lr=lr)
      
      这样就能够正确地创建Adam优化器并使用模型参数进行优化。

    如果该回答解决了您的问题,请采纳!如果没有,请参考以下方案进行修订

    用户答题指南

    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(2条)

报告相同问题?

问题事件

  • 系统已结题 3月9日
  • 已采纳回答 3月1日
  • 创建了问题 2月27日