迪菲赫尔曼
2021-11-15 18:25
采纳率: 100%
浏览 55

keras训练卷积神经网络模型总是中断,发生未知错误

img

源码如下:


import os,shutil

original_dataset_dir = r'C:\Users\15790\Deep_Learning _with_Python\conv\kaggle_original_data'

base_dir = r'C:\Users\15790\Deep_Learning _with_Python\conv\cats_and_dogs_small'
os.mkdir(base_dir)

train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir,'test')
os.mkdir(test_dir)
train_cats_dir = os.path.join(train_dir,'cats') #猫的训练图像目录
os.mkdir(train_cats_dir)

train_dogs_dir = os.path.join(train_dir,'dogs') #狗的训练图像目录
os.mkdir(train_dogs_dir)

validation_cats_dir = os.path.join(validation_dir,'cats') #猫的验证图像目录
os.mkdir(validation_cats_dir)

validation_dogs_dir = os.path.join(validation_dir,'dogs') #狗的验证图像目录
os.mkdir(validation_dogs_dir)

test_cats_dir = os.path.join(test_dir,'cats') #猫的测试图像目录
os.mkdir(test_cats_dir)

test_dogs_dir = os.path.join(test_dir,'dogs') #狗的测试图像目录
os.mkdir(test_dogs_dir)

from keras import layers
from keras import models

model = models.Sequential()
model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(150,150,3)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(128,(3,3),activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(128,(3,3),activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid'))

from tensorflow.keras import optimizers

model.compile(loss='binary_crossentropy',
              optimizer=optimizers.RMSprop(lr=1e-4),
              metrics=['acc'])

from keras.preprocessing.image import ImageDataGenerator

# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)

train_generator = train_datagen.flow_from_directory(
        # This is the target directory
        train_dir,
        # All images will be resized to 150x150
        target_size=(150, 150),
        batch_size=20,
        # Since we use binary_crossentropy loss, we need binary labels
        class_mode='binary')

validation_generator = test_datagen.flow_from_directory(
        validation_dir,
        target_size=(150, 150),
        batch_size=20,
        class_mode='binary')


``

for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break

````

history = model.fit_generator(
      train_generator,
      steps_per_epoch=100,
      epochs=30,
      validation_data=validation_generator,
      validation_steps=50)

2条回答 默认 最新

相关推荐 更多相似问题