tsne可视化
代码如下:
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 11:55:08 2021
@author: 1
"""
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
from sklearn.manifold import TSNE
import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from scikeras.wrappers import KerasClassifier
from tensorflow.python.keras.utils.np_utils import to_categorical
from sklearn.model_selection import cross_val_score, train_test_split, KFold
from sklearn.preprocessing import LabelEncoder
from keras.models import model_from_json
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import itertools
from keras.optimizers import SGD
from keras.layers import Dense, LSTM, Activation, Flatten, Convolution1D, Dropout, MaxPooling1D, BatchNormalization
from keras.models import load_model
from sklearn import preprocessing
# 载入数据
df= pd.read_csv(r'/root/autodl-tmp/376data3.csv')
X = np.expand_dims(df.values[:, 0:1024].astype(float), axis=2)
Y = df.values[:, 1024]
# 湿度分类编码为数字
# 划分训练集,测试集
X_train, X_test, K, y = train_test_split(X, Y, test_size=0.3, random_state=0)
K = K
encoder = LabelEncoder()
Y_encoded1 = encoder.fit_transform(K)
Y_train = to_categorical(Y_encoded1)
Y_encoded2 = encoder.fit_transform(y)
Y_test = to_categorical(Y_encoded2)
# 定义神经网络
def baseline_model():
model = Sequential()
model.add(Convolution1D(16, 64, strides=16, padding='same', input_shape=(1024, 1), activation='relu')) # 第一个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(32, 3, padding='same', activation='relu'))
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(64, 3, padding='same', activation='relu')) # 第二个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(64, 3, padding='same', activation='relu')) # 第三个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(64, 3, padding='same', activation='relu')) # 第四个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Convolution1D(64, 3, padding='same', activation='relu')) # 第五个卷积层
model.add(MaxPooling1D(2, strides=2, padding='same'))
model.add(
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros',
gamma_initializer='ones', moving_mean_initializer='zeros',
moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None,
beta_constraint=None, gamma_constraint=None))
model.add(Dense(100, activation='relu'))
model.add(LSTM(64, return_sequences=True))
model.add(Dropout(0.5))
model.add(LSTM(32))
model.add(Flatten())
model.add(Dense(758, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
# 训练分类器
estimator = KerasClassifier(build_fn=baseline_model, epochs=10, batch_size=128, verbose=1)
history = estimator.fit(X_train, Y_train, validation_data=(X_test, Y_test))
import matplotlib.pyplot as plt
print(estimator.model)
# 卷积网络可视化
def visual(model, data, num_layer=1):
layer = keras.backend.function([model.layers[0].input], [model.layers[num_layer].output])
f1 = layer([data])[0]
np.set_printoptions(threshold=np.inf)
print(f1.shape)
print(f1)
f2 = f1.reshape(6034, 64)
print(f2)
num = f1.shape[-1]
print(num)
plt.figure(figsize=(6, 12), dpi=150)
for i in range(num):
plt.subplot(np.ceil(np.sqrt(num)), np.ceil(np.sqrt(num)), i + 1)
plt.imshow(f1[:, :, i] * 255, cmap='prism')
plt.axis('off')
plt.show()
def get_data():
# digits = datasets.load_digits(n_class=10)
digits = 2
data = f2 # digits.data # 图片特征
label = K # digits.target # 图片标签
n_samples = 6034
n_features = 64 # data.shape # 数据集的形状
return data, label, n_samples, n_features
# 对样本进行预处理并画图
def plot_embedding(data, label, title):
x_min, x_max = np.min(data, 0), np.max(data, 0)
data = (data - x_min) / (x_max - x_min) # 对数据进行归一化处理
fig = plt.figure() # 创建图形实例
ax = plt.subplot(111) # 创建子图
# 遍历所有样本
for i in range(data.shape[0]):
# 在图中为每个数据点画出标签
plt.text(data[i, 0], data[i, 1], str(label[i]), color=plt.cm.Set1(label[i] / 10),
fontdict={'weight': 'bold', 'size': 7})
plt.xticks() # 指定坐标的刻度
plt.yticks()
plt.title(title, fontsize=14)
# 返回值
return fig
data, label, n_samples, n_features = get_data() # 调用函数,获取数据集信息
print('Starting compute t-SNE Embedding...')
ts = TSNE(n_components=2, init='pca', random_state=0)
# t-SNE降维
reslut = ts.fit_transform(data)
# 调用函数,绘制图像
fig = plot_embedding(reslut, label, 't-SNE Embedding of digits')
# 显示图像
plt.show()
# 可视化卷积层
visual(estimator.model, X_train, 20) # 在这里插入代码片
报错如下:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_15584/378268790.py in <module>
111 # 训练分类器
112 estimator = KerasClassifier(build_fn=baseline_model, epochs=10, batch_size=128, verbose=1)
--> 113 history = estimator.fit(X_train, Y_train, validation_data=(X_test, Y_test))
114 import matplotlib.pyplot as plt
115
~/miniconda3/lib/python3.8/site-packages/scikeras/wrappers.py in fit(self, X, y, sample_weight, **kwargs)
1489 sample_weight = 1 if sample_weight is None else sample_weight
1490 sample_weight *= compute_sample_weight(class_weight=self.class_weight, y=y)
-> 1491 super().fit(X=X, y=y, sample_weight=sample_weight, **kwargs)
1492 return self
1493
~/miniconda3/lib/python3.8/site-packages/scikeras/wrappers.py in fit(self, X, y, sample_weight, **kwargs)
758 kwargs["initial_epoch"] = kwargs.get("initial_epoch", 0)
759
--> 760 self._fit(
761 X=X,
762 y=y,
~/miniconda3/lib/python3.8/site-packages/scikeras/wrappers.py in _fit(self, X, y, sample_weight, warm_start, epochs, initial_epoch, **kwargs)
926 self._check_model_compatibility(y)
927
--> 928 self._fit_keras_model(
929 X,
930 y,
~/miniconda3/lib/python3.8/site-packages/scikeras/wrappers.py in _fit_keras_model(self, X, y, sample_weight, warm_start, epochs, initial_epoch, **kwargs)
522 hist = self.model_.fit(x=X, y=y, **fit_args)
523 else:
--> 524 hist = self.model_.fit(x=X, y=y, **fit_args)
525
526 if not warm_start or not hasattr(self, "history_") or initial_epoch == 0:
~/miniconda3/lib/python3.8/site-packages/keras/src/utils/traceback_utils.py in error_handler(*args, **kwargs)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
~/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py in tf__test_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
ValueError: in user code:
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1972, in test_function *
return step_function(self, iterator)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1956, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1944, in run_step **
outputs = model.test_step(data)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1852, in test_step
self.compute_loss(x, y, y_pred, sample_weight)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/training.py", line 1139, in compute_loss
return self.compiled_loss(
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/engine/compile_utils.py", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/losses.py", line 142, in __call__
losses = call_fn(y_true, y_pred)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/losses.py", line 268, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/losses.py", line 2122, in categorical_crossentropy
return backend.categorical_crossentropy(
File "/root/miniconda3/lib/python3.8/site-packages/keras/src/backend.py", line 5560, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 434) and (None, 758) are incompatible