在运行facewap—GAN的FaceSwap_GAN_video_conversion模块时间,出现报错:
Traceback (most recent call last):
File "C:/Users/Lenovo/Desktop/faceswap-GAN-master/FaceSwap_GAN_video_conversion.py", line 13, in <module>
model = FaceswapGANModel(**arch_config)
File "C:\Users\Lenovo\Desktop\faceswap-GAN-master\networks\faceswap_gan_model.py", line 61, in __init__
self.netGA = Model(x, self.decoder_A(self.encoder(x)))
File "E:\AODnet-by-pytorch-master\lib\site-packages\keras\engine\base_layer.py", line 489, in __call__
output = self.call(inputs, **kwargs)
File "E:\AODnet-by-pytorch-master\lib\site-packages\keras\engine\network.py", line 583, in call
output_tensors, _, _ = self.run_internal_graph(inputs, masks)
File "E:\AODnet-by-pytorch-master\lib\site-packages\keras\engine\network.py", line 740, in run_internal_graph
layer.call(computed_tensor, **kwargs))
File "E:\AODnet-by-pytorch-master\lib\site-packages\keras\layers\convolutional.py", line 171, in call
dilation_rate=self.dilation_rate)
File "E:\AODnet-by-pytorch-master\lib\site-packages\keras\backend\tensorflow_backend.py", line 3717, in conv2d
**kwargs)
File "E:\AODnet-by-pytorch-master\lib\site-packages\tensorflow_core\python\ops\nn_ops.py", line 898, in convolution
name=name)
File "E:\AODnet-by-pytorch-master\lib\site-packages\tensorflow_core\python\ops\nn_ops.py", line 976, in convolution_internal
strides = _get_sequence(strides, n, channel_index, "strides")
File "E:\AODnet-by-pytorch-master\lib\site-packages\tensorflow_core\python\ops\nn_ops.py", line 77, in _get_sequence
name, n, n + 2, current_n))
ValueError: strides should be of length 1, 3 or 5 but was 2
进程已结束,退出代码1
相关代码
import keras.backend as K
# Input/Output resolution
RESOLUTION = 64 # 64x64, 128x128, 256x256
assert (RESOLUTION % 64) == 0, "RESOLUTION should be 64, 128, 256"
# Architecture configuration
arch_config = {}
#crd arch_config['IMAGE_SHAPE'] = (RESOLUTION, RESOLUTION, 3)
arch_config['IMAGE_SHAPE'] = (RESOLUTION, RESOLUTION, 3,1)
arch_config['use_self_attn'] = True
arch_config['norm'] = "instancenorm" # instancenorm, batchnorm, layernorm, groupnorm, none
arch_config['model_capacity'] = "standard" # standard, lite
from networks.faceswap_gan_model import FaceswapGANModel
model = FaceswapGANModel(**arch_config)
faceswap_gan_model.py部分:
class FaceswapGANModel():
"""
faceswap-GAN v2.2 model
Attributes:
arch_config: A dictionary that contains architecture configurations (details are described in train notebook).
nc_G_inp: int, number of generator input channels
nc_D_inp: int, number of discriminator input channels
lrG: float, learning rate of the generator
lrD: float, learning rate of the discriminator
"""
def __init__(self, **arch_config):
self.nc_G_inp = 3
self.nc_D_inp = 6
self.IMAGE_SHAPE = arch_config['IMAGE_SHAPE']
self.lrD = 2e-4
self.lrG = 1e-4
self.use_self_attn = arch_config['use_self_attn']
self.norm = arch_config['norm']
self.model_capacity = arch_config['model_capacity']
self.enc_nc_out = 256 if self.model_capacity == "lite" else 512
# define networks
self.encoder = self.build_encoder(nc_in=self.nc_G_inp,
input_size=self.IMAGE_SHAPE[0],
use_self_attn=self.use_self_attn,
norm=self.norm,
model_capacity=self.model_capacity
)
self.decoder_A = self.build_decoder(nc_in=self.enc_nc_out,
input_size=8,
output_size=self.IMAGE_SHAPE[0],
use_self_attn=self.use_self_attn,
norm=self.norm,
model_capacity=self.model_capacity
)
self.decoder_B = self.build_decoder(nc_in=self.enc_nc_out,
input_size=8,
output_size=self.IMAGE_SHAPE[0],
use_self_attn=self.use_self_attn,
norm=self.norm,
model_capacity=self.model_capacity
)
self.netDA = self.build_discriminator(nc_in=self.nc_D_inp,
input_size=self.IMAGE_SHAPE[0],
use_self_attn=self.use_self_attn,
norm=self.norm
)
self.netDB = self.build_discriminator(nc_in=self.nc_D_inp,
input_size=self.IMAGE_SHAPE[0],
use_self_attn=self.use_self_attn,
norm=self.norm
)
x = Input(shape=self.IMAGE_SHAPE) # dummy input tensor
self.netGA = Model(x, self.decoder_A(self.encoder(x)))
尝试寻找报错部分的strides,但并没有找到,请问是什么原因导致的呢