以下是我的代码,运行后出现了'LeNet' object has no attribute 'paramerters'的错误,请问该如何修改呢?
import time
import torch
import torch
from torch import nn,optim
from torch.nn import init
import torchvision
import torchvision.transforms as transforms
import sys
sys.path.append("C:/Users/zyx20/Desktop/深度学习编程/pythonProject")
import d2lzh_pytorch as d2l
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size=256
if sys.platform.startswith('win'):
num_workers = 0 # 0表示不用额外的进程来加速读取数据
else:
num_workers = 4
mnist_train = torchvision.datasets.FashionMNIST(root='C:/Users/zyx20/Desktop/深度学习编程/MNIST/raw', train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='C:/Users/zyx20/Desktop/深度学习编程/MNIST/raw', train=False, download=True, transform=transforms.ToTensor())
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
class LeNet(nn.Module):
def __init__(self):
super(LeNet,self).__init__()
self.conv=nn.Sequential(nn.Conv2d(1,6,5),
nn.Sigmoid(),
nn.MaxPool2d(2,2),
nn.Conv2d(6,16,5),
nn.Sigmoid(),
nn.MaxPool2d(2,2))
self.fc=nn.Sequential(
nn.Linear(16*4*4,120),
nn.Sigmoid(),
nn.Linear(120,84),
nn.Sigmoid(),
nn.Linear(84,10)
)
def foward(self,img):
feature=self.conv(img)
output=self.fc(feature.view(img.shape[0],-1))
return output
net=LeNet()
def evaluate_accuracy(data_iter,net,device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):
acc_sum,n=0.0,0
with torch.no_grad():
for X,y in data_iter:
if isinstance(net,torch.nn.Module):
net.eval()#评估模式,这会关闭dropout
acc_sum+=(net(X.to(device)).argmax(dim=1)==y.to(device)).float().sum().cpu().item()
net.train()#改回训练模式
else:
if('is_training' in net.__code__.co_varnames):#如果有is_training这个参数
#将is_training设置为False
acc_sum+=(net(X,is_training=False).argmax(dim=1)==y).float().sum().cpu().item()
else:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().cpu().item()
n += y.shape[0]
return acc_sum/n
def train_ch5(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):
net=net.to(device)
print("training on",device)
loss=torch.nn.CrossEntropyLoss
batch_count=0
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n,start = 0.0, 0.0, 0,time.time()
for X,y in train_iter:
y_hat = net(X)
X=X.to(device)
y=y.to(device)
l=loss(y_hat,y)
optimizer.zero_grad()
l.backward()
optimizer.step()
train_l_sum+=l.cpu().item()
train_acc_sum+=(y_hat.argmax(dim=1)==y).sum().cpu().item()
n += y.shape[0]
batch_count+=1
test_acc=evaluate_accuracy(test_iter,net)
print('epoch %d,loss %.4f,train acc %.3f,test acc %.3f%,time %.lf sec'%(epoch+1,train_l_sum/batch_count,train_acc_sum/n,test_acc,time.time()-start))
lr,num_epochs=0.001,5
optimizer=torch.optim.Adam(net.paramerters(),lr=lr)
train_ch5(net,train_iter,test_iter,num_epochs,batch_size,optimizer,device,num_epochs)
