编写的VAE代码,跑2017数据集准确率低,只有0.53,原数据不加标签维度为46,训练集benign.csv 5600行,46列
测试集ceshi 2400行,47列
将数据用PCA主成分分析法降维到5维、16维、32维,准确率变化不大。请问如何提升准确率到0.85或0.9以上?
源代码如下:
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Lambda, Conv1D, Flatten, SpatialDropout1D, Reshape
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras import backend as K
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import pandas as pd
import time
from tensorflow.keras import backend as K
# 读取数据
csv_path_train = 'E:/dataset/CIC-IDS-2017/benign.csv'
X_train = pd.read_csv(csv_path_train).values
# 假设 X_train 是你的输入数据
# 处理无穷大值
X_train = np.where(np.isinf(X_train), np.nan, X_train) # 将无穷大替换为 NaN
X_train = np.nan_to_num(X_train) # 将 NaN 替换为 0 或其他适当的值
X_train = np.nan_to_num(MinMaxScaler().fit_transform(StandardScaler().fit_transform(X_train)))
X_train = np.reshape(X_train, (-1, 100, 46))
csv_path_test = 'E:/dataset/CIC-IDS-2017/ceshi.csv'
Y_test = pd.read_csv(csv_path_test)
Y_test_normal = Y_test[Y_test.label == 'Benign'].drop(labels='label', axis=1).values
Y_test_normal = np.where(np.isinf(X_train), np.nan, X_train) # 将无穷大替换为 NaN
Y_test_normal = np.nan_to_num(X_train) # 将 NaN 替换为 0 或其他适当的值
print(f"Y_test_normal:{Y_test_normal.shape}")
# 假设 Y_test_normal 是三维的,形状为 (n_samples, n_time_steps, n_features)
# 我们需要将数据重塑为 (n_samples * n_time_steps, n_features)
n_samples, n_time_steps, n_features = Y_test_normal.shape
Y_test_normal_reshaped = Y_test_normal.reshape(n_samples * n_time_steps, n_features)
# 应用 StandardScaler
scaler = StandardScaler()
Y_test_normal_scaled = scaler.fit_transform(Y_test_normal_reshaped)
# 如果需要,再应用 MinMaxScaler(通常不需要两者同时使用)
min_max_scaler = MinMaxScaler()
Y_test_normal = min_max_scaler.fit_transform(Y_test_normal_scaled)
Y_test_normal = np.nan_to_num(MinMaxScaler().fit_transform(StandardScaler().fit_transform(Y_test_normal)))
Y_test_normal = np.reshape(Y_test_normal, (-1, 100, 46))
Y_test_abnormal = Y_test[Y_test.label != 'Benign'].drop(labels='label', axis=1).values
Y_test_abnormal = np.where(np.isinf(X_train), np.nan, X_train) # 将无穷大替换为 NaN
Y_test_abnormal = np.nan_to_num(X_train) # 将 NaN 替换为 0 或其他适当的值
n_samples, n_time_steps, n_features = Y_test_abnormal.shape
Y_test_abnormal_reshaped = Y_test_abnormal.reshape(n_samples * n_time_steps, n_features)
# 应用 StandardScaler
scaler = StandardScaler()
Y_test_abnormal_scaled = scaler.fit_transform(Y_test_abnormal_reshaped)
# 如果需要,再应用 MinMaxScaler(通常不需要两者同时使用)
min_max_scaler = MinMaxScaler()
Y_test_abnormal = min_max_scaler.fit_transform(Y_test_normal_scaled)
Y_test_abnormal = np.nan_to_num(MinMaxScaler().fit_transform(StandardScaler().fit_transform(Y_test_abnormal)))
Y_test_abnormal = np.reshape(Y_test_abnormal, (-1, 100, 46))
original_dim = 46 # 特征维度
latent_dim = 2 # 潜在空间维度
intermediate_dim = 256
batch_size = 100
# 采样函数
def sampling(args):
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
# 编码器
inputs = Input(shape=(100, original_dim))
x = Conv1D(64, kernel_size=3, activation='relu', padding='same')(inputs)
x = Flatten()(x)
z_mean = Dense(latent_dim)(x)
z_log_var = Dense(latent_dim)(x)
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
# 解码器
latent_inputs = Input(shape=(latent_dim,))
x = Dense(100 * 64, activation='relu')(latent_inputs)
x = Reshape((100, 64))(x) # 将展平的层重新调整为三维
x = Conv1D(64, kernel_size=3, activation='relu', padding='same')(x)
outputs = Conv1D(original_dim, kernel_size=3, activation='sigmoid', padding='same')(x)
decoder = Model(latent_inputs, outputs, name='decoder')
# VAE模型
outputs = decoder(encoder(inputs)[2]) # 连接编码器和解码器
vae = Model(inputs, outputs, name='vae')
# 定义损失函数
reconstruction_loss = tf.keras.losses.binary_crossentropy(K.flatten(inputs), K.flatten(outputs))
reconstruction_loss *= 100 # 对应维度
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1) * -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
test_first_d = Y_test_abnormal.shape[0]
print(f"test_normal:{Y_test_normal.shape[0]}")
print(f"test_abnormal:{Y_test_abnormal.shape[0]}")
# 训练VAE
startTime = time.time() # 开始时间
vae.fit(X_train, epochs=200, batch_size=batch_size, validation_data=(Y_test_normal, None))
losssum1=0
losssum2=0
for i in range(1,batch_size):
loss11=vae.evaluate(Y_test_normal)
losssum1=+loss11
loss22 = vae.evaluate(Y_test_abnormal)
losssum2=+loss22
print(f"loss1:{loss11}")
print(f"loss2:{loss22}")
endTime = time.time() #结束时间
print(f"Took {round((endTime - startTime), 5)} seconds to calculate.")
vae.summary()
print("Testing data shape:", Y_test_normal.shape)
result = vae.evaluate(Y_test_normal)
print("Evaluate result:", result)
loss1 = vae.evaluate(Y_test_normal)
loss2 = vae.evaluate(Y_test_abnormal)
print(f"loss1:{loss1}")
print(f"loss2:{loss2}")
from sklearn.metrics import roc_curve, auc
# 预测部分
def evaluate_vae(X_test, model):
# 获取重构输出
reconstructions = model.predict(X_test)
# 计算重构误差
reconstruction_errors = np.mean(np.abs(X_test - reconstructions), axis=(1, 2))
return reconstruction_errors
# 计算阈值
def find_threshold(y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
# 选择使FPR和TPR之差最大的阈值
optimal_idx = np.argmax(tpr - fpr)
optimal_threshold = thresholds[optimal_idx]
return optimal_threshold
# 获取重构误差
recon_errors_normal = evaluate_vae(Y_test_normal, vae)
recon_errors_abnormal = evaluate_vae(Y_test_abnormal, vae)
# 合并数据
recon_errors = np.concatenate([recon_errors_normal, recon_errors_abnormal])
labels = np.array([0]*len(recon_errors_normal) + [1]*len(recon_errors_abnormal))
# 找到最佳阈值
optimal_threshold = find_threshold(labels, recon_errors)
# 使用阈值进行二分类
predictions = (recon_errors > optimal_threshold).astype(int)
from sklearn.metrics import confusion_matrix # 导入混淆矩阵计算函数
# 计算混淆矩阵
cm = confusion_matrix(labels, predictions)
TP = cm[1, 1]
TN = cm[0, 0]
FP = cm[0, 1]
FN = cm[1, 0]
# 计算评价指标
accuracy = (TP + TN) / (TP + TN + FP + FN)
precision = TP / (TP + FP)
recall = TP / (TP + FN)
f1 = 2 * (precision * recall) / (precision + recall)
# 绘制ROC曲线并计算AUC
fpr, tpr, _ = roc_curve(labels, recon_errors)
roc_auc = auc(fpr, tpr)
print(f"Accuracy: {accuracy}")
print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"F1 Score: {f1}")
print(f"AUC: {roc_auc}")
# 绘制ROC曲线
import matplotlib.pyplot as plt
plt.figure()
plt.plot(fpr, tpr, label=f'AUC = {roc_auc:.2f}')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.show()
PCA降维代码如下:
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.decomposition import PCA
假设你的数据集是一个CSV文件,使用pandas加载
data = pd.read_csv('E:/dataset/CIC-IDS-2017/benign.csv') # 替换为你的CSV文件路径
X = data # 在这个例子中,data已经是一个NumPy数组
假设 X 是你的输入数据
处理无穷大值
X= np.where(np.isinf(X), np.nan, X) # 将无穷大替换为 NaN
X= np.nan_to_num(X) # 将 NaN 替换为 0 或其他适当的值
X= np.nan_to_num(MinMaxScaler().fit_transform(StandardScaler().fit_transform(X)))
数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
PCA降维到16个主成分
pca = PCA(n_components=32)
X_pca = pca.fit_transform(X_scaled)
查看原始数据和降维后数据的形状
print(f"原始数据的形状: {X.shape}")
print(f"降维后的数据形状: {X_pca.shape}")
如果需要将降维后的数据保存为CSV文件,可以使用pandas
df_pca = pd.DataFrame(X_pca, columns=[f'PC{i+1}' for i in range(32)])
df_pca.to_csv('E:/dataset/CIC-IDS-2017/reduced_benign.csv', index=False)
假设你的数据集是一个CSV文件,使用pandas加载
data = pd.read_csv('E:/dataset/CIC-IDS-2017/ceshi1.csv') # 替换为你的CSV文件路径
X = data # 在这个例子中,data已经是一个NumPy数组
假设 X 是你的输入数据
处理无穷大值
X= np.where(np.isinf(X), np.nan, X) # 将无穷大替换为 NaN
X= np.nan_to_num(X) # 将 NaN 替换为 0 或其他适当的值
X= np.nan_to_num(MinMaxScaler().fit_transform(StandardScaler().fit_transform(X)))
数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
PCA降维到9个主成分
pca = PCA(n_components=32)
X_pca = pca.fit_transform(X_scaled)
查看原始数据和降维后数据的形状
print(f"原始数据的形状: {X.shape}")
print(f"降维后的数据形状: {X_pca.shape}")
如果需要将降维后的数据保存为CSV文件,可以使用pandas
df_pca = pd.DataFrame(X_pca, columns=[f'PC{i+1}' for i in range(32)])
df_pca.to_csv('E:/dataset/CIC-IDS-2017/reduced_ceshi1.csv', index=False)
运行结果:


