class Generator(nn.Module):
def __init__(self, scale_factor):
upsample_block_num = int(math.log(scale_factor, 2))
super(Generator, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=9, padding=4),
nn.PReLU()
)
self.block2 = ResidualBlock(64)
self.block3 = ResidualBlock(64)
self.block4 = ResidualBlock(64)
self.block5 = ResidualBlock(64)
self.block6 = ResidualBlock(64)
self.block7 = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64)
)
block8 = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]
block8.append(nn.Conv2d(64, 3, kernel_size=9, padding=4))
self.block8 = nn.Sequential(*block8)
def forward(self, x):
block1 = self.block1(x)
block2 = self.block2(block1)
block3 = self.block3(block2)
block4 = self.block4(block3)
block5 = self.block5(block4)
block6 = self.block6(block5)
block7 = self.block7(block6)
block8 = self.block8(block1 + block7)
return (torch.tanh(block8) + 1) / 2
import argparse
import time
import numpy as np
import torch
from PIL import Image
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from torchvision.transforms import ToTensor, ToPILImage
from model import Generator, Discriminator
parser = argparse.ArgumentParser(description='Test Single Image')
parser.add_argument('--upscale_factor', default=4, type=int, help='super resolution upscale factor')
parser.add_argument('--test_mode', default='CPU', type=str)
parser.add_argument('--image_name', type=str, help='test low resolution image name', default='data/Test/target/bee_副本.jpg')
parser.add_argument('--model_name', default='netG_epoch_4_100.pth', type=str, help='generator model epoch name')
opt = parser.parse_args()
UPSCALE_FACTOR = opt.upscale_factor
TEST_MODE = True if opt.test_mode == 'CPU' else False
IMAGE_NAME = opt.image_name
MODEL_NAME = opt.model_name
model = Generator(UPSCALE_FACTOR).eval()
if TEST_MODE:
#model.cuda()
model.load_state_dict(torch.load('epochs/' + MODEL_NAME))
else:
model.load_state_dict(torch.load('epochs/' + MODEL_NAME, map_location=lambda storage, loc: storage))
image = Image.open(IMAGE_NAME)
image = Variable(ToTensor()(image)).unsqueeze(0)
writer = SummaryWriter('output')
start = time.perf_counter()
out = model(image)
elapsed = (time.perf_counter() - start)
print('训练用时' + str(elapsed) + '秒')
out_img = ToPILImage()(out[0].detach().cpu())
out_img_array = np.array(out_img)
# out_img_array = tensor_to_image(out)
print(out_img_array.shape)
writer.add_image("test", out_img_array, 2, dataformats='HWC')
writer.close()
# out_img.show()
请问为什么输出的图像是这样的