李沂阳 2022-09-28 22:04 采纳率: 66.7%
浏览 95
已结题

DAE-BILSTM轴承故障诊断,无法将训练好的DAE低维特征导入BILSTM中,BILSTM的batchsize不会设置

问题遇到的现象和发生背景

我要做的是基于DAE(降噪自编码器)和BILSTM的轴承故障诊断 ,DAE训练完成后将encoder的输出作为分类器BILSTM的输入,进行故障诊断。DAE中batchsize设置的是64,DAE当分类器为全连接网络可以正常使用 但我想把DAE的分类器的全连接层改成BILSTM,但我把encoder完成后的特征导入BILSTM出错了,因为导入以后的特征batchsize默认为1,用torch.view强制转换也不行,报的错误是Expected input batch_size (1) to match target batch_size (64).,我不知道怎么修改BILSTM层的batchsize .

这是我的DAE代码 使用的是DAE1D 以及BILSTM1D

from __future__ import print_function
import torch.nn as nn
import torch
from BiLSTM1d import *
inputflag = 0
# ----------------------------------inputsize == 64*1*512---------------------64=batchsize 1通道数 512维数 全连接是直接对最后一维512进行全连接的,因为这里通道都为1 
class encoder(nn.Module):
    def __init__(self, in_channel=1, out_channel=10):
        super(encoder, self).__init__()
        self.in_channel = in_channel
        # Encoder
        self.fc1 = nn.Sequential(
            nn.Linear(512, 1024),
            nn.BatchNorm1d(1024),
            nn.ReLU(inplace=True))

        self.fc2 = nn.Sequential(
            nn.Linear(1024, 1024),
            nn.BatchNorm1d(1024),
            nn.ReLU(inplace=True))

        self.fc3 = nn.Sequential(
            nn.Linear(1024, 64),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True))

        self.fc4 = nn.Linear(64, 16)
        self.relu = nn.ReLU()

    def forward(self, x):
        global inputflag
        #64*1*512
#         x=torch.unsqueeze(x, dim=2)
        if x.shape[2] == 512:
            noise = torch.rand(x.shape).cuda() * x.mean() / 10
            x = x + noise
            out = x.view(x.size(0), -1)
#             out=torch.unsqueeze(out, dim=2)
            inputflag = 0
            out = self.fc1(out)
        else:
            inputflag = 1
            noise = torch.rand(x.shape).cuda() * x.mean() / 10
            x = x + noise
            out = x.view(x.size(0), -1)
        
        out = self.fc2(out)
        
        out = self.fc3(out)
        
        out = self.fc4(out)
        
        return out

class decoder(nn.Module):
    def __init__(self, in_channel=1, out_channel=10):
        super(decoder, self).__init__()

        self.fc1 = nn.Sequential(
            nn.Linear(16, 64),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True))

        self.fc2 = nn.Sequential(
            nn.Linear(64, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True))

        self.relu = nn.ReLU()
        self.fc3 = nn.Linear(256, 1024)
        self.fc4 = nn.Linear(1024, 512)

    def forward(self, z):
        out = self.fc1(z)
        
        out = self.fc2(out)
        
        if inputflag ==1:
            out = self.fc3(out)
        else:
            out = self.relu(self.fc3(out))
            out = self.fc4(out)
        
        
        return out


class classifier(nn.Module):
    def __init__(self, in_channel=1, out_channel=10):
        super(classifier, self).__init__()
        
#         self.fc6 = nn.Sequential(nn.ReLU(),nn.Linear(16, out_channel))
        self.fc6=BiLSTM(64)
    def forward(self, z):
        print(z.shape)
#         z = out.view(-1,out.size(0),out.size(1))
        label = self.fc6(z)
        return label


用代码块功能插入代码,请勿粘贴截图

这是我的BILSTM代码

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from attention_layer import AttentionLayer
#卷积层第一个参数为输入 第二个参数为输出  产生通道改变的层:卷积层、第一个全连接层、最后一个全连接层(softmax层)。

# flatten层、pooling层、dropout不改变通道
class BiLSTM(nn.Module):
    def __init__(self, in_channel=1, out_channel=10):
        super(BiLSTM, self).__init__()
        self.hidden_dim = 64
        self.kernel_num = 16
        self.num_layers = 2
        self.V = 25
        self.embed1 = nn.Sequential(
            nn.Conv1d(in_channel, self.kernel_num, kernel_size=3, padding=1),
            nn.BatchNorm1d(self.kernel_num),
            nn.ReLU(inplace=True),
            nn.MaxPool1d(kernel_size=2, stride=2))
        self.embed2 = nn.Sequential(
            nn.Conv1d(self.kernel_num, 8, kernel_size=3, padding=1),
            nn.BatchNorm1d(8),
            nn.ReLU(inplace=True),
            nn.AdaptiveMaxPool1d(self.V))
        self.hidden2label1 = nn.Sequential(nn.Linear(self.V * 2 * self.hidden_dim, self.hidden_dim * 4), nn.ReLU(), nn.Dropout())
        self.hidden2label2 = nn.Linear(self.hidden_dim * 4, out_channel)
        self.bilstm = nn.LSTM(self.kernel_num*2, self.hidden_dim,
                              num_layers=self.num_layers, bidirectional=True,
                              batch_first=True, bias=False)

    def forward(self, x):
        x = self.embed1(x)
        x = self.embed2(x)
        x = x.view(-1, self.kernel_num*2, self.V)
        x = torch.transpose(x, 1, 2)
        bilstm_out, _ = self.bilstm(x)
        bilstm_out = torch.tanh(bilstm_out)
        bilstm_out = bilstm_out.view(bilstm_out.size(0), -1)
        logit = self.hidden2label1(bilstm_out)
        logit = self.hidden2label2(logit)

        return logit



这是我的参数配置的AE代码 里面是一些数据导入,设置CUDA等等,

#!/usr/bin/python
# -*- coding:utf-8 -*-

import logging
import os
import time
import warnings
import torch
from torch import nn
from torch import optim
import models
import AE_Datasets
import torch.nn.functional as F


def SAEloss(recon_x, x, z):
    """
    recon_x: generating images
    x: origin images
    mu: latent mean
    logvar: latent log variance
    """
    reconstruction_function = nn.MSELoss()  # mse loss
    BCE = reconstruction_function(recon_x, x)
    pmean = 0.5
    p = F.sigmoid(z)
    p = torch.mean(p, 1)
    KLD = pmean * torch.log(pmean / p) + (1 - pmean) * torch.log((1 - pmean) / (1 - p))
    KLD = torch.sum(KLD, 0)
    return BCE + KLD


class train_utils(object):
    def __init__(self, args, save_dir):
        self.args = args
        self.save_dir = save_dir

    def setup(self):
        """
        Initialize the datasets, model, loss and optimizer
        :param args:
        :return:
        """
        args = self.args

        # Consider the gpu or cpu condition
        if torch.cuda.is_available():
            self.device = torch.device("cuda")
            self.device_count = torch.cuda.device_count()
            logging.info('using {} gpus'.format(self.device_count))
            assert args.batch_size % self.device_count == 0, "batch size should be divided by device count"
        else:
            warnings.warn("gpu is not available")
            self.device = torch.device("cpu")
            self.device_count = 1
            logging.info('using {} cpu'.format(self.device_count))


        # Load the datasets
        if args.processing_type == 'O_A':
            from AE_Datasets.O_A import datasets
            Dataset = getattr(datasets, args.data_name)
        elif args.processing_type == 'R_A':
            from AE_Datasets.R_A import datasets
            Dataset = getattr(datasets, args.data_name)
        elif args.processing_type == 'R_NA':
            from AE_Datasets.R_NA import datasets
            Dataset = getattr(datasets, args.data_name)
        else:
            raise Exception("processing type not implement")


        self.datasets = {}

        self.datasets['train'], self.datasets['val'] = Dataset(args.data_dir, args.normlizetype).data_preprare()

        self.dataloaders = {x: torch.utils.data.DataLoader(self.datasets[x], batch_size=args.batch_size,
                                                           shuffle=(True if x == 'train' else False),
                                                           num_workers=args.num_workers,
                                                           pin_memory=(True if self.device == 'cuda' else False))
                            for x in ['train', 'val']}
        # Define the model
        fmodel=getattr(models, args.model_name)
        self.encoder = getattr(fmodel, 'encoder')(in_channel=Dataset.inputchannel, out_channel=Dataset.num_classes)

        self.decoder = getattr(fmodel, 'decoder')(in_channel=Dataset.inputchannel,
                                                                   out_channel=Dataset.num_classes)
        self.classifier = getattr(fmodel, 'classifier')(in_channel=Dataset.inputchannel,
                                                                   out_channel=Dataset.num_classes)

        # Define the optimizer
        if args.opt == 'sgd':
            self.optimizer = optim.SGD([{'params': self.encoder.parameters()}, {'params': self.decoder.parameters()}],
                                       lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        elif args.opt == 'adam':
            self.optimizer = optim.Adam([{'params': self.encoder.parameters()}, {'params': self.decoder.parameters()}],
                                        lr=args.lr, weight_decay=args.weight_decay)
        else:
            raise Exception("optimizer not implement")

        # Define the learning rate decay
        if args.lr_scheduler == 'step':
            steps = [int(step) for step in args.steps.split(',')]
            self.lr_scheduler = optim.lr_scheduler.MultiStepLR(self.optimizer, steps, gamma=args.gamma)
        elif args.lr_scheduler == 'exp':
            self.lr_scheduler = optim.lr_scheduler.ExponentialLR(self.optimizer, args.gamma)
        elif args.lr_scheduler == 'stepLR':
            steps = int(args.steps)
            self.lr_scheduler = optim.lr_scheduler.StepLR(self.optimizer, steps, args.gamma)
        elif args.lr_scheduler == 'fix':
            self.lr_scheduler = None
        else:
            raise Exception("lr schedule not implement")

        # Define the optimizer
        if args.opt == 'sgd':
            self.optimizer1 = optim.SGD([{'params': self.encoder.parameters()}, {'params': self.classifier.parameters()}],
                                        lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
        elif args.opt == 'adam':
            self.optimizer1 = optim.Adam([{'params': self.encoder.parameters()}, {'params': self.classifier.parameters()}],
                                         lr=args.lr, weight_decay=args.weight_decay)
        else:
            raise Exception("optimizer not implement")

        # Define the learning rate decay
        if args.lr_scheduler == 'step':
            steps1 = [int(step) for step in args.steps1.split(',')]
            self.lr_scheduler1 = optim.lr_scheduler.MultiStepLR(self.optimizer1, steps1, gamma=args.gamma)
        elif args.lr_scheduler == 'exp':
            self.lr_scheduler1 = optim.lr_scheduler.ExponentialLR(self.optimizer1, args.gamma)
        elif args.lr_scheduler == 'stepLR':
            steps1 = int(args.steps1)
            self.lr_scheduler1 = optim.lr_scheduler.StepLR(self.optimizer1, steps1, args.gamma)
        elif args.lr_scheduler == 'fix':
            self.lr_scheduler1 = None
        else:
            raise Exception("lr schedule not implement")

        self.start_epoch = 0
        # Invert the model and define the loss
        self.encoder.to(self.device)
        self.encoder.to(self.device)
        self.decoder.to(self.device)
        self.classifier.to(self.device)
        self.criterion = nn.CrossEntropyLoss()
        self.criterion1 = nn.MSELoss()


    def train(self):
        """
        Training process
        :return:
        """
        args = self.args

        step = 0
        best_acc = 0.0
        batch_count = 0
        batch_loss = 0.0
        batch_acc = 0
        step_start = time.time()

        traing_acc = []
        testing_acc = []

        traing_loss = []
        testing_loss = []

        print("Training Autoencoder with minimum loss")
        for epoch in range(args.middle_epoch):

            logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.middle_epoch - 1) + '-'*5)
            # Update the learning rate
            if self.lr_scheduler is not None:
                # self.lr_scheduler.step(epoch)
                logging.info('current lr: {}'.format(self.lr_scheduler.get_lr()))
            else:
                logging.info('current lr: {}'.format(args.lr))

            # Each epoch has a training and val phase
            for phase in ['train', 'val']:
                # Define the temp variable
                epoch_start = time.time()
                epoch_loss = 0.0

                # Set model to train mode or test mode
                if phase == 'train':
                    self.encoder.train()
                    self.decoder.train()
                else:
                    self.encoder.eval()
                    self.decoder.eval()

                for batch_idx, (inputs, labels) in enumerate(self.dataloaders[phase]):
                    inputs = inputs.to(self.device)

                    # Do the learning process, in val, we do not care about the gradient for relaxing
                    with torch.set_grad_enabled(phase == 'train'):
                    #forward
                        if args.model_name in ["Vae1d", "Vae2d"]:
                            mu, logvar = self.encoder(inputs)
                            recx = self.decoder(mu, logvar)
                            loss = self.criterion1(recx, inputs)
                        elif args.model_name in ["Sae1d", "Sae2d"]:
                            z = self.encoder(inputs)
                            recx = self.decoder(z)
                            loss = SAEloss(recx, inputs, z)
                        elif args.model_name in ["Ae1d", "Ae2d", "Dae1d", "Dae2d"]:
                            z = self.encoder(inputs)
                            recx = self.decoder(z)
                            loss = self.criterion1(recx, inputs)

                        loss_temp = loss.item() * inputs.size(0)
                        epoch_loss += loss_temp

                        # Calculate the training information
                        if phase == 'train':
                            # backward
                            self.optimizer.zero_grad()
                            loss.backward()
                            self.optimizer.step()

                            batch_loss += loss_temp
                            batch_count += inputs.size(0)

                            # Print the training information
                            if step % args.print_step == 0:
                                batch_loss = batch_loss / batch_count
                                temp_time = time.time()
                                train_time = temp_time - step_start
                                step_start = temp_time
                                batch_time = train_time / args.print_step if step != 0 else train_time
                                sample_per_sec = 1.0*batch_count/train_time
                                logging.info('Epoch: {} [{}/{}], Train Loss: {:.4f}'
                                             '{:.1f} examples/sec {:.2f} sec/batch'.format(
                                    epoch, batch_idx*len(inputs), len(self.dataloaders[phase].dataset),
                                    batch_loss, sample_per_sec, batch_time
                                ))
                                batch_loss = 0.0
                                batch_count = 0
                            step += 1


                # Print the train and val information via each epoch
                epoch_loss = epoch_loss / len(self.dataloaders[phase].dataset)
                logging.info('Epoch: {} {}-Loss: {:.4f}, Cost {:.4f} sec'.format(
                    epoch, phase, epoch_loss, time.time()-epoch_start
                ))

            if self.lr_scheduler is not None:
                self.lr_scheduler.step()

        for epoch1 in range(self.start_epoch, args.max_epoch):
            logging.info('-' * 5 + 'Epoch {}/{}'.format(epoch1, args.max_epoch - 1) + '-' * 5)
            # Update the learning rate
            if self.lr_scheduler1 is not None:
                # self.lr_scheduler1.step(epoch1)
                logging.info('current lr: {}'.format(self.lr_scheduler1.get_lr()))
            else:
                logging.info('current lr: {}'.format(args.lr))

            # Each epoch has a training and val phase
            for phase in ['train', 'val']:
                # Define the temp variable
                epoch_start = time.time()
                epoch_acc = 0
                epoch_loss = 0.0

                # Set model to train mode or test mode
                if phase == 'train':
                    self.encoder.train()
                    self.classifier.train()
                else:
                    self.encoder.eval()
                    self.classifier.eval()
                for batch_idx, (inputs, labels) in enumerate(self.dataloaders[phase]):
                    inputs = inputs.to(self.device)
                    labels = labels.to(self.device)

                    # Do the learning process, in val, we do not care about the gradient for relaxing
                    with torch.set_grad_enabled(phase == 'train'):
                        # forward
                        if args.model_name in ["Vae1d", "Vae2d"]:
                            mu, logvar = self.encoder(inputs)
                            logits = self.classifier(mu, logvar)
                            loss = self.criterion(logits, labels)
                        elif args.model_name in ["Sae1d", "Sae2d"]:
                            z = self.encoder(inputs)
                            logits = self.classifier(z)
                            loss = self.criterion(logits, labels)
                        elif args.model_name in ["Ae1d", "Ae2d", "Dae1d", "Dae2d"]:
                            z = self.encoder(inputs)
                            logits = self.classifier(z)
                            loss = self.criterion(logits, labels)

                        pred = logits.argmax(dim=1)
                        correct = torch.eq(pred, labels).float().sum().item()
                        loss_temp = loss.item() * inputs.size(0)
                        epoch_loss += loss_temp
                        epoch_acc += correct

                        # Calculate the training information
                        if phase == 'train':
                            # backward
                            self.optimizer1.zero_grad()
                            loss.backward()
                            self.optimizer1.step()

                            batch_loss += loss_temp
                            batch_acc += correct
                            batch_count += inputs.size(0)

                            # Print the training information
                            if step % args.print_step == 0:
                                batch_loss = batch_loss / batch_count
                                batch_acc = batch_acc / batch_count
                                temp_time = time.time()
                                train_time = temp_time - step_start
                                step_start = temp_time
                                batch_time = train_time / args.print_step if step != 0 else train_time
                                sample_per_sec = 1.0 * batch_count / train_time
                                logging.info('Epoch: {} [{}/{}], Train Loss: {:.4f} Train Acc: {:.4f},'
                                             '{:.1f} examples/sec {:.2f} sec/batch'.format(
                                    epoch1, batch_idx * len(inputs), len(self.dataloaders[phase].dataset),
                                    batch_loss, batch_acc, sample_per_sec, batch_time
                                ))
                                batch_acc = 0
                                batch_loss = 0.0
                                batch_count = 0
                            step += 1

                # Print the train and val information via each epoch
                epoch_loss = epoch_loss / len(self.dataloaders[phase].dataset)
                epoch_acc = epoch_acc / len(self.dataloaders[phase].dataset)

                if phase == "train":
                    traing_acc.append(epoch_acc)
                    traing_loss.append(epoch_loss)
                else:
                    testing_acc.append(epoch_acc)
                    testing_loss.append(epoch_loss)
                logging.info('Epoch: {} {}-Loss: {:.4f} {}-Acc: {:.4f}, Cost {:.4f} sec'.format(
                    epoch1, phase, epoch_loss, phase, epoch_acc, time.time() - epoch_start
                ))

                # save the model
                if phase == 'val':
                    # save the checkpoint for other learning
                    model_state_dic = self.classifier.module.state_dict() if self.device_count > 1 else self.classifier.state_dict()
                    # save the best model according to the val accuracy
                    if epoch_acc > best_acc or epoch1 > args.max_epoch-2:
                        best_acc = epoch_acc
                        logging.info("save best model epoch {}, acc {:.4f}".format(epoch1, epoch_acc))
                        torch.save(model_state_dic,
                                   os.path.join(self.save_dir, '{}-{:.4f}-best_model.pth'.format(epoch1, best_acc)))


            if self.lr_scheduler1 is not None:
                self.lr_scheduler1.step()


这是获取参数的代码

#!/usr/bin/python
# -*- coding:utf-8 -*-

import argparse
import os
from datetime import datetime
from utils.logger import setlogger
import logging
from utils.train_utils_ae import train_utils


args = None

def parse_args():
    parser = argparse.ArgumentParser(description='Train')

    # basic parameters
    parser.add_argument('--model_name', type=str, default='Dae1d', help='the name of the model')
    parser.add_argument('--data_name', type=str, default='CWRUFFT', help='the name of the data')
    parser.add_argument('--data_dir', type=str, default= r"E:\yiyang study\xuexi\yanjiusheng fangxiang\code\DL-based-Intelligent-Diagnosis-Benchmark-master\cwrudata", help='the directory of the data')
    parser.add_argument('--normlizetype', type=str, choices=['0-1', '1-1', 'mean-std'],default="1-1", help='data pre-processing ')
    parser.add_argument('--processing_type', type=str, choices=['R_A', 'R_NA', 'O_A'], default='R_A',
                        help='R_A: random split with data augmentation, R_NA: random split without data augmentation, O_A: order split with data augmentation')
    parser.add_argument('--cuda_device', type=str, default='0', help='assign device')
    parser.add_argument('--checkpoint_dir', type=str, default='./checkpoint', help='the directory to save the model')
    parser.add_argument("--pretrained", type=bool, default=True, help='whether to load the pretrained model')
    parser.add_argument('--batch_size', type=int, default=64, help='batchsize of the training process')
    parser.add_argument('--num_workers', type=int, default=0, help='the number of training process')

    # optimization information
    parser.add_argument('--opt', type=str, choices=['sgd', 'adam'], default='adam', help='the optimizer')
    parser.add_argument('--lr', type=float, default=0.001, help='the initial learning rate')
    parser.add_argument('--momentum', type=float, default=0.9, help='the momentum for sgd')
    parser.add_argument('--weight_decay', type=float, default=1e-5, help='the weight decay')
    parser.add_argument('--lr_scheduler', type=str, choices=['step', 'exp', 'stepLR', 'fix'], default='fix', help='the learning rate schedule')
    parser.add_argument('--gamma', type=float, default=0.1, help='learning rate scheduler parameter for step and exp')
    parser.add_argument('--steps', type=str, default='10,20,30,40', help='the learning rate decay for step and stepLR')
    parser.add_argument('--steps1', type=str, default='50,80',
                        help='the learning rate decay for step and stepLR')


    # save, load and display information
    parser.add_argument('--middle_epoch', type=int, default=50, help='middle number of epoch')
    parser.add_argument('--max_epoch', type=int, default=100, help='max number of epoch')
    parser.add_argument('--print_step', type=int, default=100, help='the interval of log training information')
    args = parser.parse_args()
    return args


if __name__ == '__main__':

    args = parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_device.strip()
    # Prepare the saving path for the model
    sub_dir = args.model_name+'_'+args.data_name + '_' + datetime.strftime(datetime.now(), '%m%d-%H%M%S')
    save_dir = os.path.join(args.checkpoint_dir, sub_dir)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    # set the logger
    setlogger(os.path.join(save_dir, 'training.log'))

    # save the args
    for k, v in args.__dict__.items():
        logging.info("{}: {}".format(k, v))

    trainer = train_utils(args, save_dir)
    trainer.setup()
    trainer.train()







  • 写回答

0条回答 默认 最新

      报告相同问题?

      问题事件

      • 系统已结题 10月6日
      • 创建了问题 9月28日

      悬赏问题

      • ¥15 我需要在PC端 开两个抖店工作台客户端.(语言-java)
      • ¥15 有没有哪位厉害的人可以用C#可视化呀
      • ¥15 可以帮我看看代码哪里错了吗
      • ¥15 设计一个成绩管理系统
      • ¥15 PCL注册的选点等函数如何取消注册
      • ¥15 问一下各位,为什么我用蓝牙直接发送模拟输入的数据,接收端显示乱码呢,米思齐软件上usb串口显示正常的字符串呢?
      • ¥15 Python爬虫程序
      • ¥15 crypto 这种的应该怎么找flag?
      • ¥15 代码已写好,求帮我指出错误,有偿!
      • ¥15 matlab+波形匹配算法