m0_52062055 2022-08-31 16:21 采纳率: 30%
浏览 499
已结题

python使用model.compile方法的时候,遇到AttributeError: 'NoneType' object has no attribute 'compile'这个问题

在跑《deep learning for computer vision with python》第二本第11章的googlenet程序时候,遇到使用model.compile方法出现bug。'NoneType' object has no attribute 'compile'
import matplotlib

matplotlib.use("Agg")

from sklearn.preprocessing import LabelBinarizer
from pyimagesearch.nn.conv import MiniGoogLeNet
from pyimagesearch.callbacks import TrainingMonitor
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
from keras.optimizers import SGD
from keras.datasets import cifar10
import numpy as np
import argparse
import os

NUM_EPOCHS = 70
INIT_LR = 5e-3


def poly_decay(epoch):
    maxEpochs = NUM_EPOCHS
    baseLR = INIT_LR
    power = 1.0

    alpha = baseLR * (1 - (epoch / float(maxEpochs))) ** power

    return alpha


ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
                help="path to output model")
ap.add_argument("-o", "--output", required=True,
                help="path to output directory (logs, plots, etc.)")
args = vars(ap.parse_args())

print("[INFO] loading CIFAR-10 data...")
((trainX, trainY), (testX, testY)) = cifar10.load_data()
trainX = trainX.astype("float")
testX = testX.astype("float")

mean = np.mean(trainX, axis=0)
trainX -= mean
testX -= mean

lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)

aug = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,
                         horizontal_flip=True,
                         fill_mode="nearest")

figPath = os.path.sep.join([args["output"], "{}.png".format(os.getpid())])
jsonPath = os.path.sep.join([args["output"], "{}.json".format(os.getpid())])
callbacks = [TrainingMonitor(figPath, jsonPath=jsonPath),
             LearningRateScheduler(poly_decay)]

print("[INFO] compiling model...")
opt = SGD(lr=INIT_LR, momentum=0.9)
model = MiniGoogLeNet.build(width=32, height=32, depth=3, classes=10)
model.compile(loss="categorical_crossentropy", optimizer=opt,
              metrics=["accuracy"])

print("[INFO] training network...")
model.fit_generator(aug.flow(trainX, trainY, batch_size=64),
                    validation_data=(testX, testY), steps_per_epoch=len(trainX) // 64,
                    epochs=NUM_EPOCHS, callbacks=callbacks, verbose=1)
print("[INFO] serializing network...")
model.save(args["model"])

在terminal端输入 python googlenet_cifar10.py 指令的时候 出现'NoneType' object has no attribute 'compile'的bug
  • 写回答

5条回答 默认 最新

  • 这次真没糖 2022-08-31 22:12
    关注

    MiniGoogLeNet的代码有点问题,参考下面这个。

    
    # import the necessary packages
    from tensorflow.keras.layers import BatchNormalization
    from tensorflow.keras.layers import Conv2D
    from tensorflow.keras.layers import AveragePooling2D
    from tensorflow.keras.layers import MaxPooling2D
    from tensorflow.keras.layers import Activation
    from tensorflow.keras.layers import Dropout
    from tensorflow.keras.layers import Dense
    from tensorflow.keras.layers import Flatten
    from tensorflow.keras.layers import Input
    from tensorflow.keras.models import Model
    from tensorflow.keras.layers import concatenate
    from tensorflow.keras import backend as K
     
    class MiniGoogLeNet:
        @staticmethod
        def conv_module(x, K, kX, kY, stride, chanDim, padding="same"):
            # define a CONV => BN => RELU pattern
            x = Conv2D(K, (kX, kY), strides=stride, padding=padding)(x)
            x = BatchNormalization(axis=chanDim)(x)
            x = Activation("relu")(x)
            # return the block
            return x
     
        @staticmethod
        def inception_module(x, numK1x1, numK3x3, chanDim):
            # define two CONV modules, then concatenate across the
            # channel dimension
            conv_1x1 = MiniGoogLeNet.conv_module(x, numK1x1, 1, 1, (1, 1), chanDim)
            conv_3x3 = MiniGoogLeNet.conv_module(x, numK3x3, 3, 3, (1, 1), chanDim)
            x = concatenate([conv_1x1, conv_3x3], axis=chanDim)
            # return the block
            return x
     
        @staticmethod
        def downsample_module(x, K, chanDim):
            # define the CONV module and POOL, then concatenate
            # across the channel dimensions
            conv_3x3 = MiniGoogLeNet.conv_module(x, K, 3, 3, (2, 2), chanDim, padding="valid")
            pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
            x = concatenate([conv_3x3, pool], axis=chanDim)
            # return the block
            return x
     
        @staticmethod
        def build(width, height, depth, classes):
            # initialize the input shape to be "channels last" and the
            # channels dimension itself
            inputShape = (height, width, depth)
            chanDim = -1
     
            # if we are using "channels first", update the input shape
            # and channels dimension
            if K.image_data_format() == "channels_first":
                inputShape = (depth, height, width)
                chanDim = 1
            # define the model input and first CONV module
            inputs = Input(shape=inputShape)
            x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1), chanDim)
            # two Inception modules followed by a downsample module
            x = MiniGoogLeNet.inception_module(x, 32, 32, chanDim)
            x = MiniGoogLeNet.inception_module(x, 32, 48, chanDim)
            x = MiniGoogLeNet.downsample_module(x, 80, chanDim)
     
            # four Inception modules followed by a downsample module
            x = MiniGoogLeNet.inception_module(x, 112, 48, chanDim)
            x = MiniGoogLeNet.inception_module(x, 96, 64, chanDim)
            x = MiniGoogLeNet.inception_module(x, 80, 80, chanDim)
            x = MiniGoogLeNet.inception_module(x, 48, 96, chanDim)
            x = MiniGoogLeNet.downsample_module(x, 96, chanDim)
     
            # two Inception modules followed by global POOL and dropout
            x = MiniGoogLeNet.inception_module(x, 176, 160, chanDim)
            x = MiniGoogLeNet.inception_module(x, 176, 160, chanDim)
            x = AveragePooling2D((7, 7))(x)
            x = Dropout(0.5)(x)
     
            # softmax classifier
            x = Flatten()(x)
            x = Dense(classes)(x)
            x = Activation("softmax")(x)
     
            # create the model
            model = Model(inputs, x, name="googlenet")
     
            # return the constructed network architecture
            return model
    
    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(4条)

报告相同问题?

问题事件

  • 系统已结题 9月9日
  • 已采纳回答 9月1日
  • 赞助了问题酬金10元 8月31日
  • 创建了问题 8月31日

悬赏问题

  • ¥15 Stata链式中介效应代码修改
  • ¥15 latex投稿显示click download
  • ¥15 请问读取环境变量文件失败是什么原因?
  • ¥15 在若依框架下实现人脸识别
  • ¥15 添加组件无法加载页面,某块加载卡住
  • ¥15 网络科学导论,网络控制
  • ¥15 利用Sentinel-2和Landsat8做一个水库的长时序NDVI的对比,为什么Snetinel-2计算的结果最小值特别小,而Lansat8就很平均
  • ¥15 metadata提取的PDF元数据,如何转换为一个Excel
  • ¥15 关于arduino编程toCharArray()函数的使用
  • ¥100 vc++混合CEF采用CLR方式编译报错