以下是我的代码,想问一下运行后为何出现如图所示错误?(NotImplementedError: Module [FlattenLayer] is missing the required "forward" function)该如何修改?
import torchvision
import torchvision.transforms as transforms
import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
sys.path.append("C:/Users/zyx20/Desktop/深度学习编程/pythonProject")
import d2lzh_pytorch as d2l
batch_size = 256
if sys.platform.startswith('win'):
num_workers = 0 # 0表示不用额外的进程来加速读取数据
else:
num_workers = 4
mnist_train = torchvision.datasets.FashionMNIST(root='C:/Users/zyx20/Desktop/深度学习编程/MNIST/raw', train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='C:/Users/zyx20/Desktop/深度学习编程/MNIST/raw', train=False, download=True, transform=transforms.ToTensor())
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
num_inputs=784
num_outputs=10
class LinearNet(nn.Module):
def __init__(self,num_inputs,num_outputs):
super(LinearNet,self).__init__()
self.linear=nn.Linear(num_inputs,num_outputs)
def forward(self,x):
y=self.linear(x.view(x.shape[0],-1))
return y
net=LinearNet(num_inputs,num_outputs)
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer,self).__init__()
def foward(self,x):
return x.view(x.shape[0],-1)
from collections import OrderedDict
net=nn.Sequential(
#FlattenLayer(),
#nn.Linear(num_inputs,num_outputs)
OrderedDict([('flatten',FlattenLayer()),
('linear',nn.Linear(num_inputs,num_outputs))])
)
init.normal_(net.linear.weight,mean=0,std=0.01)
init.constant_(net.linear.bias,val=0)
#定义交叉熵损失函数
loss=nn.CrossEntropyLoss()
#定义优化算法
optimizer=torch.optim.SGD(net.parameters(),lr=0.1)
#训练模型
num_epochs=5
def train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,params=None,lr=None,optimizer=None):
for epoch in range(num_epochs):
train_l_sum,train_acc_sum,n=0.0,0.0,0
for X,y in train_iter:
y_hat=net(X)
l=loss(y_hat,y).sum()
#梯度清零
if optimizer is not None:
optimizer.zero_grad()
elif params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
if optimizer is None:
d2l.sgd(params,lr,batch_size)
else:
optimizer.step()
train_l_sum+=l.item()
train_acc_sum+=(y_hat.argmax(dim=1)==y).sum().item()
n+=y.shape[0]
test_acc=evaluate_accuracy(test_iter,net)
print('epoch %d,loss %.4f,train acc %.3f,test acc %.3f'%(epoch+1,train_l_sum/n,train_acc_sum/n,test_acc))
train_ch3(net,train_iter,test_iter,loss,num_epochs,batch_size,None,None)