import os
import numpy as np
import pandas as pd
import tensorflow as tf
from keras import backend as K
import matplotlib.pyplot as plt
from sklearn import preprocessing
from autokeras import StructuredDataRegressor
from sklearn.model_selection import train_test_split
# report path
report_path = r'C:\Users\ASUS\Gesamtdata\Gesamt_drei_Input\Kraft'
# import data
df=pd.read_excel('Kraftdata.xlsx')
# normalization
def normalization(dataframe):
m_scaler = preprocessing.MinMaxScaler()
data = m_scaler.fit_transform(dataframe)
return data
def my_metric(y_test1, predict):
n = len(y_test1)
# convert n int to float32
n = tf.cast(n, tf.float32 )
acc_tem = K.abs(y_test1 - predict) / y_test1
acc = (1 - K.sqrt(K.sum(acc_tem * acc_tem) / n)) * 100
return acc
def get_accuracy(test_data,predict_data):
n = len(test_data)
acc_tem = np.abs(test_data - predict_data) / test_data
# acc_tem[acc_tem > 1] = 0
Acc = (1 - np.sqrt(np.sum(acc_tem * acc_tem) / n)) * 100
return Acc
print(df.shape)
data = df.values
data[:, 0:3] = normalization(data[:, 0:3])
print(data)
data = data.astype('float32')
x, y = data[:, 0:3], data[:, 3]
print(y)
print(x.shape,y.shape)
x_train, x_test, y_train, y_test = train_test_split(
x, y,
test_size=0.20,
random_state=50)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
y_test = np.array(y_test).reshape(len(y_test),1)
print(y_test)
search = StructuredDataRegressor(loss='mean_squared_error',optimizer='adam')
search.fit(
x=x_train,y=y_train,
verbose = 1,
epochs =20000,
batch_size=16)