batchsize设置问题,设置 batch_size = 1,32,64 , 训练速度很慢 ,而且loss也很大,达到上千,
数据的shape为 torch.Size([5152, 1, 2000]) 设置为 batch_size = len(model.encoder(tain_data)) = 5152 时,
训练速度变快 loss也降低很大,但效果并不好 最好才到 loss = 2 左右.
全连接层出nn.Linear(1998, 256)),这里的256是可以人为随意设置么,还是需要与输入有对应关系?
class autoencoder(nn.Module):
def __init__(self):
super(autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv1d(1, 1, 3, 1, padding=0),
nn.ReLU(),
nn.Linear(1998, 256))
self.decoder = nn.Sequential(
nn.Linear(256, 1998),
nn.ConvTranspose1d(1, 1, 3, 1, padding=0),
nn.Sigmoid())
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
num_epochs = 100
dataloader = DataLoader(trian_data, batch_size=len(model.encoder(trian_data)), shuffle=True)
model = autoencoder()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01,
weight_decay=1e-5)
for epoch in range(num_epochs):
total_loss = 0
for data in dataloader:
x = data
x = Variable(x)
# ===================forward=====================
output = model(x)
loss = criterion(output, x)
# ===================backward====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.data
# ===================log========================
print('epoch [{}/{}], loss:{:.4f}'
.format(epoch+1, num_epochs, total_loss))
》》》epoch [95/100], loss:2.0771
epoch [96/100], loss:2.0772
epoch [97/100], loss:2.0769
epoch [98/100], loss:2.0767
epoch [99/100], loss:2.0769
epoch [100/100], loss:2.0766