Dwyne_111 2022-09-16 22:24
浏览 13
已结题

pytorc训练模型

import os
import numpy as np
import torch
import torch.nn as nn
import torch.functional as F
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from torch.utils.data import DataLoader
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.autograd import Variable

captcha_array = list("0123456789abcdefghijklmnopqrstuvwxyz")

def text2vec(x):
zeros = torch.zeros((4, 36), dtype = torch.long)
for i in range(len(x)):
zeros[i, captcha_array.index(x[i])] = 1
vec = zeros
return vec

def vec2text(x):
label = ''
label_tensor = torch.argmax(x, dim=1)
for i in label_tensor:
label += captcha_array[i.data]
return label

def single_vec_2_text(label_tensor):
label = ''
for i in label_tensor:
label += captcha_array[i.data]
return label

def batch_vec_2_text(x):
label_batch = []
label_all = x.argmax(dim=2)
for index_image in range(label_all.size(0)):
label_batch.append(single_vec_2_text(label_all[index_image]))
return label_batch

def compare_list(x, y):
result_list = []
for le in range(len(x)):
if x[le] == y[le]:
result_list.append(True)
return result_list.count(True), len(x), result_list.count(True)/len(x)

class MyNet(nn.Module):
def init(self):
super(MyNet, self).init()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, (3, 3), (1, 1), 1), # in torch.Size([batch, 1, 60, 160])
# nn.BatchNorm2d(32),
nn.ReLU(inplace = True),
nn.MaxPool2d(2) # out (batch, 32, 30, 80)
)

    self.layer2 = nn.Sequential(
        nn.Conv2d(32, 64, (3, 3), (1, 1), 1),  # in torch.Size([
        # nn.BatchNorm2d(64),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(2)  # out (batch, 64, 15, 40)
    )

    self.layer3 = nn.Sequential(
        nn.Conv2d(64, 128, (3, 3), (1, 1), 1),  # in torch.Size([
        # nn.BatchNorm2d(128),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(2)  # out (batch, 128, 7, 20)
    )

    self.layer4 = nn.Sequential(
        nn.Conv2d(128, 256, (3, 3), (1, 1), 1),  # in torch.Size(
        # nn.BatchNorm2d(256),
        nn.ReLU(inplace=True),
        nn.MaxPool2d(2)  # out (batch, 256, 3, 10)
    )

    self.layer5 = nn.Sequential(
        nn.Linear(256 * 3 * 10, 1024),
        nn.Dropout(),
        nn.ReLU(inplace=True),
        nn.Linear(1024, 4 * 36),
    )

def forward(self, x):
    out = self.layer1(x)
    out = self.layer2(out)
    out = self.layer3(out)
    out = self.layer4(out)
    out = out.view(out.size(0), -1) # 保留第一维度

    out = self.layer5(out)
    return out

class MyData(Dataset):
def init(self, if_train = True):
super(MyData, self).init()
self.transforms = transforms.Compose(
[
transforms.Resize((60, 160)),
transforms.Grayscale(),
transforms.ToTensor()
]
)
if if_train == True:
self.root = "./dataset5/train/"
else:
self.root = "./dataset5/test/"

def __len__(self):
    return(len(os.listdir(self.root)))

def __getitem__(self, item):
    img_path = self.root + os.listdir(self.root)[item]
    img = Image.open(img_path)
    img = self.transforms(img)
    label = os.listdir(self.root)[item].split('_')[0]
    label = text2vec(label)
    label = label.view(1, -1)[0] # 极其重要
    # t = torch.zeros(100, 1, 4, 36)
    # print(t.view(1, -1))
    # print(t.view(1, -1)[0])
    return img, label # img torch.Size([1, 60, 160])

if name == 'main':
# 开始训练

m = MyNet()
# 定义 loss 函数
loss_fn = nn.MultiLabelSoftMarginLoss()
optimizer = torch.optim.Adam(m.parameters(), lr=0.0008)

total_step = 0
count = 80000
count_a = 0

for epoch in range(80000):
    train_data = MyData()
    test_data = MyData(if_train=False)
    # 使用 pytorch 自带的 DataLoader 定义一个数据迭代器
    train_dataloader = DataLoader(train_data, batch_size=10, shuffle=True)
    test_dataloader = DataLoader(test_data, batch_size=10, shuffle=False)

    for imgs, targets in train_dataloader:
        imgs = Variable(imgs)
        count_a += 1
        if count_a < count:
            m.train()
            outputs = m(imgs)

            loss = loss_fn(outputs, targets)
            outputs = outputs.view(-1, 4, 36)
            # print('预测为', batch_vec_2_text(outputs))
            targets = targets.view(-1, 4, 36)
            # print('实际为', batch_vec_2_text(targets))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            total_step += 1
            print("训练{}次,loss:{}".format(total_step * 1, loss.item()),compare_list(batch_vec_2_text(outputs), batch_vec_2_text(targets)))
            with open('log.txt', 'a+') as f:
                f.write("训练{}次,loss:{}".format(total_step * 1, loss.item()) + str(compare_list(batch_vec_2_text(outputs), batch_vec_2_text(targets))) + "\n")
                f.close()
        else:
            break

        imgs, targets = next(iter(test_dataloader))
        imgs = Variable(imgs)
        m.eval()
        outputs = m(imgs)
        loss = loss_fn(outputs, targets)
        outputs = outputs.view(-1, 4, 36)
        # print('样本预测为', batch_vec_2_text(outputs))
        targets = targets.view(-1, 4, 36)
        # print('样本实际为', batch_vec_2_text(targets))
        print("训练{}次,样本loss:{}".format(total_step * 1, loss.item()),
              compare_list(batch_vec_2_text(outputs), batch_vec_2_text(targets)))

        with open('log1.txt', 'a+') as f:
            f.write("训练{}次,loss:{}".format(total_step * 1, loss.item()) + str(
                compare_list(batch_vec_2_text(outputs), batch_vec_2_text(targets))) + "\n")
            f.close()
        if count_a%15000 == 0:
            torch.save(m, "model.pth")

torch.save(m, "model.pth")
  • 写回答

0条回答 默认 最新

    报告相同问题?

    问题事件

    • 系统已结题 9月24日
    • 创建了问题 9月16日

    悬赏问题

    • ¥15 微信小程序 用oss下载 aliyun-oss-sdk-6.18.0.min client报错
    • ¥15 ArcGIS批量裁剪
    • ¥15 labview程序设计
    • ¥15 为什么在配置Linux系统的时候执行脚本总是出现E: Failed to fetch http:L/cn.archive.ubuntu.com
    • ¥15 Cloudreve保存用户组存储空间大小时报错
    • ¥15 伪标签为什么不能作为弱监督语义分割的结果?
    • ¥15 编一个判断一个区间范围内的数字的个位数的立方和是否等于其本身的程序在输入第1组数据后卡住了(语言-c语言)
    • ¥15 Mac版Fiddler Everywhere4.0.1提示强制更新
    • ¥15 android 集成sentry上报时报错。
    • ¥15 抖音看过的视频,缓存在哪个文件