LiuZi_ 2021-05-27 20:56 采纳率: 100%
浏览 349
已采纳

5.27日毕设求问,怎么绘制每一步epoch的loss图,最好能请附上程序

import numpy as np
import torch
import torch.utils.data as Data
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import h5py
from SENET import SEBlock
import matplotlib.pyplot as plt


f = h5py.File('all_xdata.h5', 'r')


bearing1_1_train = f['bearing1_1_train']
bearing1_2_train = f['bearing1_2_train']
bearing1_3_train = f['bearing1_3_train']
bearing1_4_train = f['bearing1_4_train']
bearing1_5_train = f['bearing1_5_train']
bearing1_6_train = f['bearing1_6_train']
bearing1_7_train = f['bearing1_7_train']
bearing2_1_train = f['bearing2_1_train']
bearing2_2_train = f['bearing2_2_train']
bearing2_3_train = f['bearing2_3_train']
bearing2_4_train = f['bearing2_4_train']
bearing2_5_train = f['bearing2_5_train']
bearing2_6_train = f['bearing2_6_train']
bearing2_7_train = f['bearing2_7_train']
bearing3_1_train = f['bearing3_1_train']
bearing3_2_train = f['bearing3_2_train']
bearing3_3_train = f['bearing3_3_train']
bearing1_1_target = f['bearing1_1_target']
bearing1_2_target = f['bearing1_2_target']
bearing1_3_target = f['bearing1_3_target']
bearing1_4_target = f['bearing1_4_target']
bearing1_5_target = f['bearing1_5_target']
bearing1_6_target = f['bearing1_6_target']
bearing1_7_target = f['bearing1_7_target']
bearing2_1_target = f['bearing2_1_target']
bearing2_2_target = f['bearing2_2_target']
bearing2_3_target = f['bearing2_3_target']
bearing2_4_target = f['bearing2_4_target']
bearing2_5_target = f['bearing2_5_target']
bearing2_6_target = f['bearing2_6_target']
bearing2_7_target = f['bearing2_7_target']
bearing3_1_target = f['bearing3_1_target']
bearing3_2_target = f['bearing3_2_target']
bearing3_3_target = f['bearing3_3_target']



#训练集
train_data = np.concatenate((bearing2_4_train, bearing1_1_train, bearing1_2_train, bearing3_2_train,
                             bearing1_4_train, bearing1_5_train, bearing1_6_train, bearing1_7_train,
                             bearing2_1_train, bearing2_2_train, bearing3_3_train, bearing2_6_train,
                             bearing2_5_train, bearing2_3_train, bearing2_7_train, bearing3_1_train), axis=0)

train_target = np.concatenate((bearing2_4_target, bearing1_1_target, bearing1_2_target, bearing3_2_target,
                               bearing1_4_target, bearing1_5_target, bearing1_6_target, bearing1_7_target,
                               bearing2_1_target, bearing2_2_target, bearing3_3_target, bearing2_6_target,
                               bearing2_5_target, bearing2_3_target, bearing2_7_target, bearing3_1_target), axis=0)


test_data = bearing1_3_train
test_targets = bearing1_3_target
test_targets = np.array(test_targets)
# np.concatenate((a,b),axis=)

#构建网络
class ConvNet(nn.Module):
    def __init__(self):
        super(ConvNet,self).__init__()

        self.conv1 = nn.Conv2d(in_channels=4, out_channels=64, kernel_size=(10,1), stride=1)
        self.pool1 = nn.MaxPool2d(kernel_size=(10,1), stride=10)
        self.seblock1 = SEBlock(64,r=16) 

        self.conv2 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(10,1), stride=1)
        self.pool2 = nn.MaxPool2d(kernel_size=(10,1),stride=10)
        self.seblock2 = SEBlock(32, r=8)     
        self.fc1 = nn.Linear(5*32, 10)   
        self.fc2 = nn.Linear(10, 1)      

    def forward(self, x):
        x = self.conv1(x)
        x = F.relu(x)
        x = self.seblock1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = F.relu(x)
        x = self.seblock2(x)
        x = self.pool2(x)
        x = x.view(-1, 5*32)
        x = self.fc1(x)
        x =F.relu(x)
        x = self.fc2(x)
        x = F.sigmoid(x)     
        return x

net = ConvNet()
Loss = nn.MSELoss()
LR = 0.001    
optimizer = optim.SGD(net.parameters(), lr=LR)   
BATCH_SIZE = 10   
train = torch.tensor(train_data)
targets = torch.tensor(train_target)
train_dataset = Data.TensorDataset(train,targets)
train_loader = Data.DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=False)

#训练
for epoch in range(500):  
    for step, (batch_train, batch_targets) in enumerate(train_loader):
        batch_train = batch_train.reshape((batch_train.shape[0], 4, -1, 1))
        output = net(batch_train.float())
        loss = Loss(output, batch_targets)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        print('epoch:', epoch, '|step:', step, '|loss:', loss)
prediction = []
for k in range(test_data.shape[0]):   
    data = test_data[k, :, :]
    data = data.reshape((4, -1, 1))  
    data = torch.tensor(data)
    data = Variable(torch.unsqueeze(data, dim=0).float(), requires_grad=False)
    output = net(data)
    prediction.append(output.data)
prediction = np.array(prediction)

  • 写回答

2条回答 默认 最新

  • AlanMax 2021-05-27 22:46
    关注

    看你用了torch,建议你直接保存loss的log图,然后用tensorboardX, import tensorboardX pip 安装就行

    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(1条)

报告相同问题?

悬赏问题

  • ¥15 Fluent齿轮搅油
  • ¥15 八爪鱼爬数据为什么自己停了
  • ¥15 交替优化波束形成和ris反射角使保密速率最大化
  • ¥15 树莓派与pix飞控通信
  • ¥15 自动转发微信群信息到另外一个微信群
  • ¥15 outlook无法配置成功
  • ¥30 这是哪个作者做的宝宝起名网站
  • ¥60 版本过低apk如何修改可以兼容新的安卓系统
  • ¥25 由IPR导致的DRIVER_POWER_STATE_FAILURE蓝屏
  • ¥50 有数据,怎么建立模型求影响全要素生产率的因素