-*- coding: utf-8 -*-
"""
Created on Thu Jan 11 08:56:10 2018
@author: Administrator
"""
from tensorflow.contrib import rnn
import numpy as np
import tensorflow as tf
c=np.load('C:/Users/Administrator/Desktop/jm00train.npy')
d=np.load('C:/Users/Administrator/Desktop/jm00label.npy')
jm00train=c[:140000]
jm00test=c[140000:]
c=np.float32(c)
jm00trainlabel=d[:140000]
jm00trainlabelonehot=tf.one_hot(jm00trainlabel,7)
jm00testlabel=d[140000:]
jm00testlabelonehot=tf.one_hot(jm00testlabel,7)
n_inputs=38
max_time=50
lstm_size=20
n_classes=7
#batch_size=1
#n_batch=
x=tf.placeholder(tf.float32,[None,50,38])
y=tf.placeholder(tf.float32,[None,7])
weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev=0.1))
#初始化偏置值
biases = tf.Variable(tf.constant(0.1, shape=[n_classes]))
#定义RNN网络
def RNN(X,weights,biases):
# inputs=[batch_size, max_time, n_inputs]
inputs = tf.reshape(X,[-1,max_time,n_inputs])
#定义LSTM基本CELL
lstm_cell = rnn.BasicLSTMCell(lstm_size)
# final_state[0]是cell state
# final_state[1]是hidden_state
outputs,final_state = tf.nn.dynamic_rnn(lstm_cell,inputs,dtype=tf.float32)
results = tf.nn.softmax(tf.matmul(final_state[1],weights) + biases)
return results
#计算RNN的返回结果
prediction= RNN(x, weights, biases)
#损失函数
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
#使用AdamOptimizer进行优化
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#结果存放在一个布尔型列表中
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一维张量中最大的值所在的位置
#求准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)#把correct_prediction变为float32类型
#初始化
#init=
#init = tf.global_variables_initializer()
#init=tf.global_variables_initializer()
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
sess.run(train_step,feed_dict={x:jm00train,y:jm00trainlabel})
acc = sess.run(accuracy,feed_dict={x:jm00test,y:jm00testlabelonehot})
print ("Iter " + ", Testing Accuracy= " + str(acc))