请教各位,我现在写的网络是Unet,它的作用是用来代替一个物理过程的,我是打算通过输入一张图像,经过网络后得到的图像再经过一次物理公式的推导,再和原图像做差作为损失函数,以这个损失函数不断的梯度更新网络的权重,用于模拟一个物理公式。但是因为电脑的配置问题跑不动Unet,所以不确定自己写的对不对,劳烦各位帮忙指点一下
import tensorflow as tf
import cv2
from My_function import get_diffract
class Unet(tf.keras.Model):
def __init__(self):
super(Unet, self).__init__()
self.layer1_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=[3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer1_2 = tf.keras.layers.Conv2D(filters=64, kernel_size=[3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer2_1 = tf.keras.layers.Conv2D(filters=128, kernel_size=[3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer2_2 = tf.keras.layers.Conv2D(filters=128, kernel_size=[3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer3_1 = tf.keras.layers.Conv2D(filters=256, kernel_size=[3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer3_2 = tf.keras.layers.Conv2D(filters=256, kernel_size=[3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer4_1 = tf.keras.layers.Conv2D(512, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer4_2 = tf.keras.layers.Conv2D(512, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer5_1 = tf.keras.layers.Conv2D(1024, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer5_2 = tf.keras.layers.Conv2D(1024, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer6_1 = tf.keras.layers.Conv2D(512, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer6_2 = tf.keras.layers.Conv2D(512, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer7_1 = tf.keras.layers.Conv2D(256, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer7_2 = tf.keras.layers.Conv2D(256, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer8_1 = tf.keras.layers.Conv2D(128, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer8_2 = tf.keras.layers.Conv2D(128, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer9_1 = tf.keras.layers.Conv2D(64, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer9_2 = tf.keras.layers.Conv2D(64, [3, 3], padding='same', activation='relu',
kernel_initializer='he_normal')
self.layer_end = tf.keras.layers.Conv2D(2, [1, 1], padding='same', activation='relu',
kernel_initializer='he_normal')
self.Act = tf.keras.layers.Activation('relu')
self.Bn = tf.keras.layers.BatchNormalization()
self.Maxpol = tf.keras.layers.MaxPooling2D(pool_size=[2, 2], strides=2)
def call(self, inputs):
x_1 = self.layer1_1(inputs)
x_1 = self.layer1_2(x_1)
x_2 = self.Maxpol(x_1)
x_2 = self.layer2_1(x_2)
x_2 = self.layer2_2(x_2)
x_3 = self.Maxpol(x_2)
x_3 = self.layer3_1(x_3)
x_3 = self.layer3_2(x_3)
x_4 = self.Maxpol(x_3)
x_4 = self.layer4_1(x_4)
x_4 = self.layer4_2(x_4)
x_5 = self.Maxpol(x_4)
x_5 = self.layer5_1(x_5)
x_5 = self.layer5_2(x_5)
x_6 = tf.keras.layers.Conv2D(1024, [2, 2], padding='same', kernel_initializer='he_normal')(
tf.keras.layers.UpSampling2D(size=(2, 2))(x_5))
x_6 = tf.keras.layers.concatenate([x_6, x_4], axis=3)
x_6 = self.layer6_1(x_6)
x_6 = self.layer6_2(x_6)
x_7 = tf.keras.layers.Conv2D(512, [2, 2], padding='same', kernel_initializer='he_normal')(
tf.keras.layers.UpSampling2D(size=(2, 2))(x_6))
x_7 = tf.keras.layers.concatenate([x_7, x_3], axis=3)
x_7 = self.layer7_1(x_7)
x_7 = self.layer7_2(x_7)
x_8 = tf.keras.layers.Conv2D(256, [2, 2], padding='same', kernel_initializer='he_normal')(
tf.keras.layers.UpSampling2D(size=(2, 2))(x_7))
x_8 = tf.keras.layers.concatenate([x_8, x_2], axis=3)
x_8 = self.layer8_1(x_8)
x_8 = self.layer8_2(x_8)
x_9 = tf.keras.layers.Conv2D(128, [2, 2], padding='same', kernel_initializer='he_normal')(
tf.keras.layers.UpSampling2D(size=(2, 2))(x_8))
x_9 = tf.keras.layers.concatenate([x_9, x_1], axis=3)
x_9 = self.layer9_1(x_9)
x_9 = self.layer9_2(x_9)
X = tf.keras.layers.Conv2D(64, [2, 2], padding='same', kernel_initializer='he_normal')(x_9)
X = tf.keras.layers.Conv2D(1, [1, 1], padding='same', activation='sigmoid')(X)
X = get_diffract(X)
return X
tf.config.experimental_run_functions_eagerly(True)
model = Unet()
Img = cv2.imread('C:\\Users\\CYH\\Desktop\\1.bmp', cv2.IMREAD_GRAYSCALE)
Img = Img / 255
img = tf.constant(Img)
img = tf.expand_dims(img, 0)
img = tf.expand_dims(img, -1)
img = tf.cast(img, tf.float32)
//主要部分就是以下不能够确定,所以劳烦各位帮忙看看这部分写的对不对
optimizers = tf.keras.optimizers.Adam(0.01)
mse = tf.keras.losses.MeanSquaredError()
for i in range(50):
with tf.GradientTape() as tape:
y = model(img)
loss = mse(img, y)
gradient = tape.gradient(target=loss, sources=model.trainable_variables)
optimizers.apply_gradients(zip(gradient, model.trainable_variables))
print(model.trainable_variables)