前提,我设定了随机数种子以及只用CPU进行计算,所以目前在同一台机器上代码运行多次是完全一样的。
seed_value= 0
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import random
random.seed(seed_value)
import numpy as np
np.random.seed(seed_value)
不知道为啥,一样的数据,一样的代码,换一台机器结果发生变化,我该怎样做来克服这个问题?我的问题代码如下(不同机器上结果不一样,一台机器上多次运行结果一样):
def CNNGETPREDICTVAL(train_xx,train_yy,test_xx,inner_fac_len,loop_lr,loop_dst_num):
train_xx=train_xx.drop('date_time',axis=1)
test_xx=test_xx.drop(['date_time','key_0'],axis=1)
x_train = train_xx.values.reshape(-1, 1,inner_fac_len,1)
y_train=keras.utils.np_utils.to_categorical(train_yy, num_classes = 3)
x_test = test_xx.values.reshape(-1, 1,inner_fac_len,1)
model = keras.models.Sequential()
init_info=keras.initializers.RandomNormal(mean=0.0,stddev=0.05,seed=2021)
model.add(keras.layers.Conv2D(val1, (1, 3), activation='relu',padding='same', input_shape=(1, inner_fac_len, 1),kernel_initializer=init_info))
model.add(keras.layers.MaxPooling2D(pool_size=(1, 3)))
model.add(keras.layers.Conv2D(val2, (1, 3), activation='relu',padding='same',kernel_initializer=init_info))
model.add(keras.layers.MaxPooling2D(pool_size=(1, 3)))
model.add(keras.layers.Conv2D(val3, (1, 3), activation='relu',padding='same',kernel_initializer=init_info))
model.add(keras.layers.MaxPooling2D(pool_size=(1, 3)))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(loop_dst_num, activation='relu',kernel_initializer=init_info))
model.add(keras.layers.Dense(3, activation='softmax',kernel_initializer=init_info))
my_optimizer =tf.optimizers.Adam(learning_rate=loop_lr)
model.compile(optimizer=my_optimizer, loss='mse')
model.fit(x_train, y_train, batch_size=512, epochs=10)
result = model.predict(x_test,batch_size=512,verbose=0)
return result