class SELayer(nn.Module):
def init(self, channel, reduction=1):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid())
self.fc2 = nn.Sequential(
nn.Conv2d(channel , channel // reduction, 1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(channel , channel // reduction, 1, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc1(y).view(b, c, 1, 1)
return x * y
input_tensor = Input(shape=(128,128,3))
x = keras.layers.Conv2D(64,(3,3),padding='same')(input_tensor)
in_channels = K.int_shape(x)[-1]
######引入注意力机制
model1 = SELayer(in_channels)
y = model1(x)