以下是我的代码,运行后出现RuntimeError: Given groups=1, weight of size [32, 16, 3, 3], expected input[64, 32, 56, 56] to have 16 channels, but got 32 channels instead的错误,请问是哪里的通道数设置有问题?该如何修改呢?
import time
import torch
from torch import nn,optim
import torchvision
import sys
sys.path.append("C:/Users/zyx20/Desktop/深度学习编程/pythonProject")
import d2lzh_pytorch as d2l
device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#使用vgg—block函数来实现这个基础的VGG块
def vgg_block(num_convs,in_channels,out_channels):
blk=[]
for i in range(num_convs):
if i==0:
blk.append(nn.Conv2d(in_channels,out_channels,kernel_size=3,padding=1))
else:
blk.append(nn.Conv2d(in_channels,out_channels,kernel_size=3,padding=1))
blk.append(nn.ReLU())
blk.append(nn.MaxPool2d(kernel_size=2,stride=2))
return nn.Sequential(*blk)
#构造VGG网络
conv_arch=((1,1,64),(1,64,128),(2,128,256),(2,256,512),(2,512,512))
fc_features=512*7*7
fc_hiddens_units=4096
def vgg(conv_arch,fc_features,fc_hiddens_units=4096):
net=nn.Sequential()
#卷积层部分
for i,(num_convs,in_channels,out_channels) in enumerate(conv_arch):
#每经过一个vgg_block都使宽高减半
net.add_module("vgg_block_"+str(i+1),vgg_block(num_convs,in_channels,out_channels))
#全连接层部分
net.add_module("fc",nn.Sequential(d2l.FlattenLayer(),nn.Linear(fc_features,fc_hiddens_units),nn.ReLU(),nn.Dropout(0.5),
nn.Linear(fc_hiddens_units,fc_hiddens_units),
nn.ReLU(),
nn.Dropout(0.5),
nn.Linear(fc_features,10)))
return net
#获取数据和训练模型
ratio=8
small_conv_arch=[(1,1,64//ratio),(1,64//ratio,128//ratio),(2,128//ratio,256//ratio),(2,256//ratio,512//ratio),(2,512//ratio,512//ratio)]
net=vgg(small_conv_arch,fc_features//ratio,fc_hiddens_units//ratio)
batch_size=64
train_iter,test_iter=d2l.load_data_fashion_mnist(batch_size,resize=224)
#训练
lr,num_epochs=0.001,5
optimizer=torch.optim.Adam(net.parameters(),lr=lr)
d2l.train_ch5(net,train_iter,test_iter,num_epochs,batch_size,optimizer,device,num_epochs)