


在test部分
能让precision recall f1提高这么多?
关注让阿豪来帮你解答,本回答参考chatgpt3.5编写提供,如果还有疑问可以评论或留言根据给出的截图,可以看到在“test”部分,precision、recall和f1-score的提高幅度较大。这可能是由于在训练集和测试集上表现不一致导致的。以下是可能导致此结果的一些原因:
import numpy as np
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
# 数据增强
data_gen = ImageDataGenerator(rotation_range=10, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.1, zoom_range=0.1,
horizontal_flip=True, vertical_flip=False, fill_mode='nearest')
# 数据平衡
def balance_data(X, y):
n_samples = len(y)
n_classes = len(np.unique(y))
X_balanced = np.empty((0, *X.shape[1:]))
y_balanced = np.empty((0,), dtype=int)
max_samples = int(n_samples / n_classes)
for label in range(n_classes):
X_label = X[y == label]
y_label = y[y == label]
n_label = len(y_label)
if n_label < max_samples:
X_label_new, y_label_new = X_label, y_label
else:
X_label_new, y_label_new = [], []
while len(y_label_new) < max_samples:
_, X_batch, _, y_batch = train_test_split(X_label, y_label, test_size=0.5)
X_label_new.extend(X_batch)
y_label_new.extend(y_batch)
X_balanced = np.vstack((X_balanced, X_label_new))
y_balanced = np.concatenate((y_balanced, y_label_new))
return X_balanced, y_balanced