这段代码报valid_freq=1,numpy无法转化为tensor,这里X集是numpy格式
import tensorflow as tf
import numpy as np
import wfdb
import pandas as pd
import ast
from matplotlib import pyplot as plt
import torch
from sklearn.preprocessing import LabelEncoder
def load_raw_data(df, sampling_rate, path):
if sampling_rate == 100:
data = [wfdb.rdsamp(path+f) for f in df.filename_lr]
else:
data = [wfdb.rdsamp(path+f) for f in df.filename_hr]
data = np.array([signal for signal, meta in data])
return data
path = ''
sampling_rate = 100
# load and convert annotation data
Y = pd.read_csv('ptbxl_database.csv', index_col='ecg_id')
Y.scp_codes = Y.scp_codes.apply(lambda x: ast.literal_eval(x))
# Load raw signal data
X = load_raw_data(Y, sampling_rate, path)
# Load scp_statements.csv for diagnostic aggregation
agg_df = pd.read_csv('scp_statements.csv', index_col=0)
agg_df = agg_df[agg_df.diagnostic == 1]
def aggregate_diagnostic(y_dic):
tmp = []
for key in y_dic.keys():
if key in agg_df.index:
tmp.append(agg_df.loc[key].diagnostic_class)
return list(set(tmp))
# Apply diagnostic superclass
Y['diagnostic_superclass'] = Y.scp_codes.apply(aggregate_diagnostic)
# Split data into train and test
test_fold = 10
# Train
X_train = X[np.where(Y.strat_fold != test_fold)]
y_train = Y[(Y.strat_fold != test_fold)].diagnostic_superclass
# Test
X_test = X[np.where(Y.strat_fold == test_fold)]
y_test = Y[Y.strat_fold == test_fold].diagnostic_superclass
# tenserflow 训练
print(X_test , y_test)
print(type(y_test))
print(type(X_test))
# <class 'pandas.core.series.Series'>
# <class 'numpy.ndarray'>
#该数据类型
X_test=torch.from_numpy(X_test)
X_train=torch.from_numpy(X_train)
# artificial intelligence and the training
model = tf.keras.models.Sequential([
# 卷积层
tf.keras.layers.Conv1D(filters = 96,
kernel_size = 3,
padding = "same"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Activation("relu"),
tf.keras.layers.AveragePooling1D(pool_size = 4,
padding = "valid"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128,activation = "sigmoid",
kernel_regularizer = tf.keras.regularizers.l1()),
tf.keras.layers.Dense(5, activation = "sigmoid",
kernel_regularizer = tf.keras.regularizers.l2())
])
model.compile(optimizer="nadam",
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics = ["sparse_categorical_accuracy"])
model.fit(X_train,y_train,
batch_size = 64,
epochs = 10000,
validation_data = (X_test, y_test),
validation_freq = 1)
history = model.fit(X_train,y_train,
batch_size = 64,
epochs = 10000,
validation_data = (X_test, y_test))
model.summary()
# 可视化
acc=history.history['sparse_categorical_accuaracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss=history.history['loss']
val_loss = history.history['val_loss']
plt.subplot(1,2,1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accurcy')
plt.legend()
plt.subplot(1,2,1)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()