自己搭建的网络运行报错,不知道原因,希望大家指点一下,代码如下:
def conv_block(inputs,
filters,
use_bias,
kernel_size,
padding= 'same',
strides= (1, 1),
with_conv_short_cut = False):
conv1 = Conv2D(
filters=filters,
kernel_size=kernel_size,
activation= 'relu',
strides= strides,
use_bias= use_bias,
padding= padding,
kernel_regularizer=regularizers.l2(weight_decay),
)(inputs)
conv1 = BatchNormalization(axis = 1)(conv1)
conv2 = Conv2D(
filters=filters,
kernel_size=kernel_size,
activation= 'relu',
use_bias= use_bias,
padding= padding,
kernel_regularizer=regularizers.l2(weight_decay))(conv1)
conv2 = BatchNormalization(axis = 1)(conv2)
if with_conv_short_cut:
inputs = Conv2D(
filters=filters,
kernel_size=kernel_size,
strides= strides,
use_bias= use_bias,
padding= padding,
kernel_regularizer=regularizers.l2(weight_decay)
)(inputs)
return add([inputs, conv2])
else:
return add([inputs, conv2])
weight_decay = 0.001
def resvgg16(input_shape=(28,28,1),classes=10):
X_input = Input(input_shape)
#x = ZeroPadding2D((1, 1))(X_input)
x=X_input
# Define the converlutional block 1
x = Conv2D(filters=64,kernel_size=(3,3),strides= (2, 2), activation= 'relu',padding= 'same',kernel_regularizer=regularizers.l2(weight_decay))(x)
x = BatchNormalization(axis= 1)(x)
x = conv_block(x, filters= 64, kernel_size=(3,3),use_bias= True)
x = MaxPooling2D(pool_size= (3, 3), strides= (2, 2), padding= 'same')(x)
x = Dropout(0.3)(x)
# Define the converlutional block 2
x = conv_block(x, filters= 128, kernel_size=(3,3),use_bias= True, strides= (2, 2), with_conv_short_cut= True)
x = conv_block(x, filters= 128, kernel_size=(3,3),use_bias= True)
x = MaxPooling2D(pool_size= (3, 3), strides= (2, 2), padding= 'same')(x)
x = Dropout(0.4)(x)
# Define the converlutional block 3
x = conv_block(x, filters= 256, kernel_size=(3,3),use_bias= True, strides= (2, 2), with_conv_short_cut= True)
x = conv_block(x, filters= 256, kernel_size=(3,3),use_bias= True)
x = conv_block(x, filters= 256, kernel_size=(3,3),use_bias= True)
#brach2 = MaxPooling2D(pool_size= (3, 3), strides= (2, 2), padding= 'same')(brach2)
x = MaxPooling2D(pool_size= (3, 3), strides= (2, 2), padding= 'same')(x)
x = Dropout(0.4)(x)
# Define the converlutional block 4
x = conv_block(x, filters= 512, kernel_size=(3,3),use_bias= True, strides= (2, 2), with_conv_short_cut= True)
x = conv_block(x, filters= 512, kernel_size=(3,3),use_bias= True)
x = conv_block(x, filters= 512, kernel_size=(3,3),use_bias= True)
x = conv_block(x, filters= 512, kernel_size=(3,3),use_bias= True)
x = MaxPooling2D(pool_size= (3, 3), strides= (2, 2), padding= 'same')(x)
x = Dropout(0.4)(x)
x = Flatten()(x)
x = Dense(1000, kernel_regularizer=regularizers.l2(weight_decay),activation='relu')(x)
x = BatchNormalization(axis= 1)(x)
x = Dense(512, kernel_regularizer=regularizers.l2(weight_decay),activation='relu')(x)
x = BatchNormalization(axis= 1)(x)
#x = concatenate([des1, des2, des3, des4], axis=3)
#x = Flatten()(x)
x = Dense(10, activation='softmax')(x)
model = Model(inputs=X_input,outputs=X,name='resvgg16')
return model
#model=resvgg16()
# Print the detail of the model
model = resvgg16(input_shape=(28,28,1),classes=10)
# compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc',top_k_categorical_accuracy])
history=model.fit(X_train1,y_train,epochs=28,batch_size=128,verbose=2)
model.save('resvgg16.h5')
#在测试集上评估并保存权重文件
model.evaluate(X_test1, y_test,128)
报错详细信息:
Traceback (most recent call last):
File "c:/Users/dell/Documents/daima.py/code/tree1/res+vgg16.py", line 487, in <module>
model = resvgg16(input_shape=(28,28,1),classes=10)
File "c:/Users/dell/Documents/daima.py/code/tree1/res+vgg16.py", line 482, in resvgg16
model = Model(inputs=X_input,outputs=X,name='resvgg16')
File "D:\conda\envs\tensorflow2.2\lib\site-packages\tensorflow\python\keras\engine\training.py", line 167, in __init__
super(Model, self).__init__(*args, **kwargs)
File "D:\conda\envs\tensorflow2.2\lib\site-packages\tensorflow\python\keras\engine\network.py", line 173, in __init__
self._init_graph_network(*args, **kwargs)
File "D:\conda\envs\tensorflow2.2\lib\site-packages\tensorflow\python\training\tracking\base.py", line 456, in _method_wrapper
result = method(self, *args, **kwargs)
File "D:\conda\envs\tensorflow2.2\lib\site-packages\tensorflow\python\keras\engine\network.py", line 254, in _init_graph_network
base_layer_utils.create_keras_history(self._nested_outputs)
File "D:\conda\envs\tensorflow2.2\lib\site-packages\tensorflow\python\keras\engine\base_layer_utils.py", line 186, in create_keras_history
_, created_layers = _create_keras_history_helper(tensors, set(), [])
File "D:\conda\envs\tensorflow2.2\lib\site-packages\tensorflow\python\keras\engine\base_layer_utils.py", line 212, in _create_keras_history_helper
op = tensor.op # The Op that created this Tensor.
AttributeError: 'numpy.ndarray' object has no attribute 'op'