顾小巍 2021-07-01 11:18 采纳率: 0%
浏览 59

tensorflow运行的格式问题

img TypeError: Failed to convert object of type <class 'tensorflow.python.keras.engine.sequential.Sequential'> to Tensor. Contents: <tensorflow.python.keras.engine.sequential.Sequential object at 0x0000011BE6CC07B8>. Consider casting elements to a supported type.

img


```import warnings
warnings.filterwarnings('ignore')

import h5py
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Dropout, Flatten, Dense,BatchNormalization,LeakyReLU,Reshape,Conv2DTranspose
from tensorflow.keras import backend as K
from keras.optimizers import SGD
from keras.models import load_model
import matplotlib.pyplot as plt

from  tensorflow.compat.v1 import ConfigProto
from  tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)

K.image_data_format() == 'channels_first'
from keras.utils import np_utils


def test_model_generator(input_shape=None):
    model = Sequential()
    model.add(Dense(19*19*32, use_bias=False, input_shape=(100,)))
    model.add(BatchNormalization())
    model.add(LeakyReLU())

    model.add(Reshape((19, 19, 32)))  # 8*8*256

    model.add(Conv2DTranspose(32, (1, 1), strides=(1, 1), padding='same', use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU())  # 8*8*128

    model.add(Conv2DTranspose(16, (1, 1), strides=(1, 1), padding='same', use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU())  # 16*16*128

    model.add(Conv2DTranspose(15, (1, 1), strides=(1, 1), padding='same', use_bias=False))
    model.add(BatchNormalization())
    model.add(LeakyReLU())  # 32*32*32

    model.add(Conv2DTranspose(15, (1, 1), strides=(1, 1), padding='same', use_bias=False, activation='tanh'))
    # 64*64*3

    return model

def test_model_discriminator(C1=None):
    model = Sequential()
    model.add(Conv2D(32, (5, 5), strides=(2, 2), padding='same',
                            input_shape=[19, 19, 15]))
    model.add(LeakyReLU())
    model.add(Dropout(0.3))  # 32*32*32

    model.add(Conv2D(64, (5, 5), strides=(2, 2), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout(0.3))  # 16*16*64

    model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    #    model.add(layers.Dropout(0.3))      # 8*8*128

    model.add(Conv2D(256, (5, 5), strides=(2, 2), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())  # 4*4*256

    model.add(Flatten())

    model.add(Dense(512))
    model.add(BatchNormalization())
    model.add(LeakyReLU())

    model.add(Dense(1))

    return model

cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

def dis_loss(real_output,fake_output):

    return tf.reduce_mean(fake_output)-tf.reduce_mean(real_output)

def gen_loss(fake_output):

    return cross_entropy(tf.ones_like(fake_output),fake_output)

@tf.function
def train_step(images):
    noise=tf.random.normal([BATCH_SIZE,noise_dim])

    with tf.GradientTape() as gen_tape,tf.GradientTape() as dis_tape:
        generated_images=test_model_generator(noise)

        real_output=test_model_discriminator(images)
        fake_output=test_model_discriminator(generated_images)
        print(generated_images)
        print(real_output)
        print(fake_output)

        discriminator_loss=dis_loss(real_output,fake_output)
        generator_loss=gen_loss(fake_output)

    dis_grads=dis_tape.gradient(discriminator_loss,discriminator.trainable_variables)
    gen_gards=gen_tape.gradient(generator_loss,generator.trainable_variables)

    discriminator_optimizer.apply_gradients(zip(dis_grads,discriminator.trainable_variables))
    generator_optimizer.apply_gradients(zip(gen_gards,generator.trainable_variables))

def generate_and_save_images(model, epoch, test_input):
  # Notice `training` is set to False.
  # This is so all layers run in inference mode (batchnorm).
    predictions = model(test_input, training=False)

    fig = plt.figure(figsize=(4, 4))

    for i in range(predictions.shape[0]):
        plt.subplot(4, 4, i+1)
        plt.imshow((predictions[i, :, :, 0] + 1)/2, cmap='gray')
        plt.axis('off')

    plt.savefig('./image/image_at_epoch_{:04d}.png'.format(epoch))
    plt.show()

def train(dataset,epochs):
    for epoch in range(epochs):
        for image_batch in dataset:
            train_step(image_batch)
            print('.', end='')
        print()


        generate_and_save_images(test_model_generator,
                                     epoch + 1,
                                     seed)

    generate_and_save_images(test_model_generator,
                             epochs,
                             seed)

if __name__ == '__main__':
    windowSize = 19
    numPCAcomponents = 15
    testRatio = 0.30

    BATCH_SIZE = 64
    EPOCHS = 1000
    noise_dim = 100
    num_examples_to_generate = 16

    X_train = np.load("G:/code/HSI-SVM-master/data/XtrainWindowSize"
                      + str(windowSize) + "PCA" + str(numPCAcomponents) + "testRatio" + str(testRatio) + ".npy")
    y_train = np.load("G:/code/HSI-SVM-master/data/ytrainWindowSize"
                      + str(windowSize) + "PCA" + str(numPCAcomponents) + "testRatio" + str(testRatio) + ".npy")
    X_test = np.load("G:/code/HSI-SVM-master/data/XtestWindowSize"
                     + str(windowSize) + "PCA" + str(numPCAcomponents) + "testRatio" + str(testRatio) + ".npy")
    y_test = np.load("G:/code/HSI-SVM-master/data/ytestWindowSize"
                     + str(windowSize) + "PCA" + str(numPCAcomponents) + "testRatio" + str(testRatio) + ".npy")
    # 转化为(num,channels,height,width)
    X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2], X_train.shape[3]))
    X_train=tf.cast(X_train,dtype=tf.float32)
    X_train=(X_train/127.5)-1
    # print(X_train)
    # print(X_train.shape)
    # print(X_train.dtype)
    train_datasets=tf.data.Dataset.from_tensor_slices(X_train).shuffle(1000).batch(BATCH_SIZE)
    X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], X_test.shape[2], X_test.shape[3]))
    X_test=tf.cast(X_test,dtype=tf.float32)

    # 转化标签
    y_train = np_utils.to_categorical(y_train)
    print(y_train.shape)
    y_test = np_utils.to_categorical(y_test)



    seed = tf.random.normal([num_examples_to_generate, noise_dim])

    generator_optimizer = tf.keras.optimizers.Adam(1e-4)
    discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

    generator = test_model_generator()
    discriminator = test_model_discriminator()


    # 定义输入的形状
    input_shape = X_train[0].shape
    print(input_shape)

    C1 = 3 * numPCAcomponents

    train(train_datasets, EPOCHS)
  • 写回答

1条回答 默认 最新

  • CSDN-Ada助手 CSDN-AI 官方账号 2022-09-07 18:14
    关注
    不知道你这个问题是否已经解决, 如果还没有解决的话:

    如果你已经解决了该问题, 非常希望你能够分享一下解决方案, 以帮助更多的人 ^-^
    评论

报告相同问题?

悬赏问题

  • ¥100 现在不懂的是如何将当前的相机中的照片,作为纹理贴图,映射到扫描出的模型上
  • ¥15 目标跟踪,计算机视觉
  • ¥15 魔霸ROG7 pro,win11.息屏后会显示黑屏,如图,如何解决?(关键词-重新启动)
  • ¥15 有没有人知道这是哪里出了问题啊?要怎么改呀?
  • ¥200 C++表格文件处理-悬赏
  • ¥15 Windows Server2016本地登录失败
  • ¥15 复合卡卡号轨道写入芯片卡
  • ¥20 基于MATLAB的TDOA
  • ¥15 为啥输入字体突然变了
  • ¥20 已知坐标,换成MATLAB可以用的数据