keras搭建网络模型时遇到Model的问题:AttributeError: 'NoneType' object has no attribute '_inbound_nodes',网上找了很多解决方法,基本是说在keras中必须是layer的形式,不能有函数的存在。但是我把所有程序过了几遍,能够用Lambda封装的都封装了,还是无法解决。求助求助!
import tensorflow as tf
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.layers import *
from nets.MCRmobilenetv2 import mobilenetV2
from nets.Xception import Xception
import torch
import torch.nn as nn
from T_attention import se_block, cbam, eca_block, coordinate
num_classes = 2
backbone = "mobilenet"
downsample_factor = 16
aux_branch = False
input_shape = [512, 512]
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
# 计算padding的数量,hw是否需要收缩
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
# 如果需要激活函数
if not depth_activation:
x = Activation('relu')(x)
# 分离卷积,首先3x3分离卷积,再1x1卷积
# 3x3采用膨胀卷积
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
# 1x1卷积,进行压缩
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation('relu')(x)
return x
def MCRNetmbil(input_shape, alpha=1., downsample_factor=16):
img_input = Input(shape=input_shape)
# 输入是 512 x 512 x 3
backbone = "mobilenet"
if backbone == "mobilenet":
# ----------------------------------#
# 获得四个特征层
# skip1 shape=(?, 128, 128, 24); skip2 shape=(?, 64, 64, 32); skip3 shape=(?, 32, 32, 64); skip4 shape=(?, 32, 32, 320)
# ----------------------------------#
skip1, skip2, skip3, skip4, atrous_rates = mobilenetV2(img_input, alpha, downsample_factor=downsample_factor)
else:
raise ValueError('Unsupported backbone - `{}`, Use mobilenet, xception.'.format(backbone))
# 低级特征
Y1 = Conv2D(512, (3, 3), padding='same', use_bias=False, name='Y1')(skip4)
Y1 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(Y1)
Y1 = Activation('relu')(Y1)
# Y1 32x32x512
Y1 = GlobalAveragePooling2D()(Y1)
Y1 = Lambda(lambda x: K.expand_dims(x, 1))(Y1)
Y1 = Lambda(lambda x: K.expand_dims(x, 1))(Y1)
Y1 = Lambda(lambda x: tf.image.resize_images(x, tf.keras.backend.int_shape(skip4)[1:3], align_corners=True))(Y1)
# Y_2 = Concatenate()([skip4, Y1])
Y_2 = Lambda(lambda x: K.concatenate([x, Y1], axis=3))(skip4)
Y2 = SepConv_BN(Y_2, 256, 'Y2_0',
rate=2, depth_activation=True, epsilon=1e-5)
Y2 = SepConv_BN(Y2, 256, 'Y2_1',
rate=3, depth_activation=True, epsilon=1e-5)
# Y2 shape=(?, 32, 32, 256)
# Y_3 = Concatenate()([skip4, Y2])
Y_3 = Lambda(lambda x: K.concatenate([x, Y2], axis=3))(skip4)
Y3 = SepConv_BN(Y_3, 256, 'Y3_0',
rate=2, depth_activation=True, epsilon=1e-5)
Y3 = SepConv_BN(Y3, 256, 'Y3_1',
rate=3, depth_activation=True, epsilon=1e-5)
# Y3 shape=(?, 32, 32, 256)
# Y_4 = Concatenate()([skip4, Y3])
Y_4 = Lambda(lambda x: K.concatenate([x, Y3], axis=3))(skip4)
Y4 = SepConv_BN(Y_4, 256, 'Y4_0',
rate=2, depth_activation=True, epsilon=1e-5)
Y4 = SepConv_BN(Y4, 256, 'Y4_1',
rate=3, depth_activation=True, epsilon=1e-5)
# Y4 shape=(?, 32, 32, 256)
# Y_5 = Concatenate()([skip4, Y4])
Y_5 = Lambda(lambda x: K.concatenate([x, Y4], axis=3))(skip4)
Y5 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='Y_5')(Y_5)
Y5 = BatchNormalization(name='Y_5_BN', epsilon=1e-5)(Y5)
Y5 = Activation('relu')(Y5)
# Y5 shape=(?, 32, 32, 256)
Z_4 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='Z_4')(Y4 + Y5)
Z4 = BatchNormalization(name='Z_4_BN', epsilon=1e-5)(Z_4)
Z4 = Activation('relu')(Z4)
# Z4 shape=(?, 32, 32, 256)
Z_3 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='Z_3')(Z4 + Y3)
Z3 = BatchNormalization(name='Z_3_BN', epsilon=1e-5)(Z_3)
Z3 = Activation('relu')(Z3)
# Z3 shape=(?, 32, 32, 256)
Z_2 = Conv2D(512, (1, 1), padding='same', use_bias=False, name='Z_2')(Z3 + Y2)
Z2 = BatchNormalization(name='Z_2_BN', epsilon=1e-5)(Z_2)
Z2 = Activation('relu')(Z2)
# Z2 shape=(?, 32, 32, 512)
Z_1 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='Z_1')(Z2 + Y1)
Z1 = BatchNormalization(name='Z_1_BN', epsilon=1e-5)(Z_1)
Z1 = Activation('relu')(Z1)
# Z1 shape=(?, 32, 32, 256)
Z = Concatenate()([Z1, Z2, Z3, Z4, Y5])
Z = Conv2D(256, (1, 1), padding='same', use_bias=False, name='concat_Z')(Z)
Z = BatchNormalization(name='concat_Z_BN', epsilon=1e-5)(Z)
Z = Activation('relu')(Z)
Z = Dropout(0.1)(Z)
# Z shape=(?, 32, 32, 256)
skip4 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='Z_2')(skip4)
Z = Conv2D(256, (1, 1), padding='same', use_bias=False, name='Z_1')(Z + skip4)
Z = BatchNormalization(name='concat_Z_BN', epsilon=1e-5)(Z)
OLCCA = Activation('relu')(Z)
# OLCCA shape=(?, 32, 32, 256)
# 高级特征
y3 = Lambda(lambda xx: HRC(xx, OLCCA))(skip3)
# y3 = Concatenate()([y3, OLCCA])
y3 = Lambda(lambda x: K.concatenate([x, OLCCA], axis=3))(y3)
# skip2_size = tf.keras.backend.int_shape(skip2)
y3_1 = Lambda(lambda xx: tf.image.resize_images(xx, tf.keras.backend.int_shape(skip2)[1:3], align_corners=True))(y3)
y2 = Lambda(lambda xx: HRC(xx, y3_1))(skip2)
# y2 = Concatenate()([y2, y3_1])
y2 = Lambda(lambda x: K.concatenate([x, y3_1], axis=3))(y2)
# skip1_size = tf.keras.backend.int_shape(skip1)
y2_1 = Lambda(lambda xx: tf.image.resize_images(xx, tf.keras.backend.int_shape(skip1)[1:3], align_corners=True))(y2)
y1 = Lambda(lambda xx: HRC(xx, y2_1))(skip1)
# y1 = Concatenate()([y1, y2_1])
y1 = Lambda(lambda x: K.concatenate([x, y2_1], axis=3))(y1)
y3_2 = Lambda(lambda xx: tf.image.resize_images(xx, tf.keras.backend.int_shape(skip1)[1:3], align_corners=True))(y3_1)
y = Concatenate()([y1, y2_1, y3_2])
OLCCA_1 = Lambda(lambda xx: tf.image.resize_images(xx, tf.keras.backend.int_shape(skip1)[1:3], align_corners=True))(OLCCA)
# -----堆叠最终结果--------- #
x = Concatenate()([y, OLCCA_1])
x = SepConv_BN(x, 256, 'decoder_conv0',
depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256, 'decoder_conv1',
depth_activation=True, epsilon=1e-5)
# x : shape=(?, 128, 128, 256)
# -----------------------------------------#
# 获得每个像素点的分类
# -----------------------------------------#
# 512,512
# size_before3 = tf.keras.backend.int_shape(img_input)
# 512,512,21
x = Conv2D(num_classes, (1, 1), padding='same')(x)
x = Lambda(lambda xx: tf.image.resize_images(xx, tf.keras.backend.int_shape(img_input)[1:3], align_corners=True))(x)
x = Softmax()(x)
model = Model(img_input, x, name='mcrnet')
return model
def CFE(input):
x1 = Conv2D(256, (3, 3), padding='same', use_bias=False, name='Conv')(input)
x1 = BatchNormalization(name='Conv_BN', epsilon=1e-5)(x1)
x1 = Activation('relu')(x1)
x2 = DepthwiseConv2D((3, 3), strides=(1, 1), dilation_rate=(1, 1),
padding='same', use_bias=False, name='DWConv')(input)
x2 = BatchNormalization(name='DWConv_BN', epsilon=1e-5)(x2)
x2 = Activation('relu')(x2)
end = Lambda(lambda x: K.concatenate([x1, x], axis=3))(x2)
end = Conv2D(256, (1, 1), padding='same', use_bias=False, name='concat')(end)
end = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(end)
end_1 = Activation('relu')(end)
# end_1: shape=(?, 32, 32, 256)
y = GlobalAveragePooling2D()(end_1)
# # shape=(?, 256)
y = Lambda(lambda x: K.expand_dims(x, 1))(y)
y = Lambda(lambda x: K.expand_dims(x, 1))(y)
# # shape=(?, 1, 1, 256)
y = Lambda(lambda x: tf.image.resize_images(x, tf.keras.backend.int_shape(end_1)[1:3], align_corners=True))(y)
y = Conv2D(tf.keras.backend.int_shape(end_1)[3], (1, 1), padding='same', use_bias=False, name='conv_y')(y)
y = BatchNormalization(name='Conv_yBN', epsilon=1e-5)(y)
y = Activation('relu')(y)
# end_2 = Lambda(lambda x: x * y)(end_1)
end_2 = Lambda(lambda xx: Multiply()([xx, y]))(end_1)
# end_2 = end_1 * y
input = Conv2D(tf.keras.backend.int_shape(end_1)[3], (1, 1), padding='same', use_bias=False, name='conv_y')(input)
# end = Lambda(lambda x: x + end_2)(input)
end = Lambda(lambda xx: Add()([xx, end_2]))(input)
return end
def FF(intH, intL):
L = Conv2D(256, (1, 1), padding='same', use_bias=False, name='L1')(intL)
L = BatchNormalization(name='L1_BN', epsilon=1e-5)(L)
L = Activation('relu')(L)
L = Dropout(0.1)(L)
L = Lambda(lambda xx: tf.image.resize_images(xx, tf.keras.backend.int_shape(intL)[1:3], align_corners=True))(L)
H = Conv2D(256, (1, 1), padding='same', use_bias=False, name='H1')(intH)
H = BatchNormalization(name='H1_BN', epsilon=1e-5)(H)
H = Activation('relu')(H)
H = Dropout(0.1)(H)
HL = Lambda(lambda xx: Multiply()([xx, L]))(H)
HL = Conv2D(256, (3, 3), padding='same', use_bias=False, name='HL1')(HL)
HL = BatchNormalization(name='HL1_BN', epsilon=1e-5)(HL)
HL = Activation('relu')(HL)
HL = Dropout(0.1)(HL)
IM = Concatenate()([intL, HL])
IM = Conv2D(256, (1, 1), padding='same', use_bias=False, name='IM1')(IM)
# end = Lambda(lambda x: x + intH)(IM)
end = Lambda(lambda xx: Add()([xx, intH]))(IM)
return end
def CFR(input):
x = GlobalAveragePooling2D()(input)
x = Lambda(lambda x: K.expand_dims(x, 1))(x)
x = Lambda(lambda x: K.expand_dims(x, 1))(x)
x = Conv2D(256, (1, 1), padding='same', use_bias=False, name='x1')(x)
x = Activation('sigmoid')(x)
# x = Lambda(lambda xx: tf.keras.activations.sigmoid(xx))(x)
end1 = Lambda(lambda xx: Multiply()([xx, input]))(x)
y1 = Conv2D(256, (3, 3), padding='same', use_bias=False, name='y1')(end1)
y1 = BatchNormalization(name='y1_BN', epsilon=1e-5)(y1)
y1 = Activation('relu')(y1)
y2 = DepthwiseConv2D((3, 3), strides=(1, 1), dilation_rate=(1, 1),
padding='same', use_bias=False, name='DWy2')(end1)
y2 = BatchNormalization(name='DWy2_BN', epsilon=1e-5)(y2)
y2 = Activation('relu')(y2)
# y = Lambda(lambda x: x * y2)(y1)
y = Lambda(lambda xx: Multiply()([xx, y2]))(y1)
y = BatchNormalization(name='y_BN', epsilon=1e-5)(y)
y = Activation('relu')(y)
end2 = Lambda(lambda xx: Add()([xx, end1]))(y)
end = Lambda(lambda xx: Add()([xx, input]))(end2)
return end
def HRC(h, l):
return CFR(FF(CFE(h), l))
a = MCRNetmbil([input_shape[0], input_shape[1], 3], downsample_factor=downsample_factor)
print(a)
具体报错信息为:
Traceback (most recent call last):
File "F:\project\pspdeep2\MCRNet.py", line 273, in <module>
a = MCRNetmbil([input_shape[0], input_shape[1], 3], downsample_factor=downsample_factor)
File "F:\project\pspdeep2\MCRNet.py", line 174, in MCRNetmbil
model = Model(img_input, x, name='mcrnet')
File "G:\Anaconda3\envs\tensorflow\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "G:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\network.py", line 94, in __init__
self._init_graph_network(*args, **kwargs)
File "G:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\network.py", line 241, in _init_graph_network
self.inputs, self.outputs)
File "G:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\network.py", line 1434, in _map_graph_network
tensor_index=tensor_index)
File "G:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\network.py", line 1421, in build_map
node_index, tensor_index)
File "G:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\network.py", line 1421, in build_map
node_index, tensor_index)
File "G:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\network.py", line 1421, in build_map
node_index, tensor_index)
[Previous line repeated 17 more times]
File "G:\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\network.py", line 1393, in build_map
node = layer._inbound_nodes[node_index]
AttributeError: 'NoneType' object has no attribute '_inbound_nodes'