jggybf 2019-11-05 12:11 采纳率: 0%
浏览 2491

fashion_mnist识别准确率问题

fashion_mnist识别准确率一般为多少呢?我看好多人都是92%左右,但是我用一个网络达到了94%,想问问做过的小伙伴到底是多少?

#这是我的结果示意
x_shape: (60000, 28, 28)
y_shape: (60000,)
epoches:  0 val_acc:  0.4991 train_acc 0.50481665
epoches:  1 val_acc:  0.6765 train_acc 0.66735
epoches:  2 val_acc:  0.755 train_acc 0.7474
epoches:  3 val_acc:  0.7846 train_acc 0.77915
epoches:  4 val_acc:  0.798 train_acc 0.7936
epoches:  5 val_acc:  0.8082 train_acc 0.80365
epoches:  6 val_acc:  0.8146 train_acc 0.8107
epoches:  7 val_acc:  0.8872 train_acc 0.8872333
epoches:  8 val_acc:  0.896 train_acc 0.89348334
epoches:  9 val_acc:  0.9007 train_acc 0.8986
epoches:  10 val_acc:  0.9055 train_acc 0.90243334
epoches:  11 val_acc:  0.909 train_acc 0.9058833
epoches:  12 val_acc:  0.9112 train_acc 0.90868336
epoches:  13 val_acc:  0.9126 train_acc 0.91108334
epoches:  14 val_acc:  0.9151 train_acc 0.9139
epoches:  15 val_acc:  0.9172 train_acc 0.91595
epoches:  16 val_acc:  0.9191 train_acc 0.91798335
epoches:  17 val_acc:  0.9204 train_acc 0.91975
epoches:  18 val_acc:  0.9217 train_acc 0.9220333
epoches:  19 val_acc:  0.9252 train_acc 0.9234667
epoches:  20 val_acc:  0.9259 train_acc 0.92515
epoches:  21 val_acc:  0.9281 train_acc 0.9266667
epoches:  22 val_acc:  0.9289 train_acc 0.92826664
epoches:  23 val_acc:  0.9301 train_acc 0.93005
epoches:  24 val_acc:  0.9315 train_acc 0.93126667
epoches:  25 val_acc:  0.9322 train_acc 0.9328
epoches:  26 val_acc:  0.9331 train_acc 0.9339667
epoches:  27 val_acc:  0.9342 train_acc 0.93523335
epoches:  28 val_acc:  0.9353 train_acc 0.93665
epoches:  29 val_acc:  0.9365 train_acc 0.9379333
epoches:  30 val_acc:  0.9369 train_acc 0.93885
epoches:  31 val_acc:  0.9387 train_acc 0.9399
epoches:  32 val_acc:  0.9395 train_acc 0.9409
epoches:  33 val_acc:  0.94 train_acc 0.9417667
epoches:  34 val_acc:  0.9403 train_acc 0.94271666
epoches:  35 val_acc:  0.9409 train_acc 0.9435167
epoches:  36 val_acc:  0.9418 train_acc 0.94443333
epoches:  37 val_acc:  0.942 train_acc 0.94515
epoches:  38 val_acc:  0.9432 train_acc 0.9460667
epoches:  39 val_acc:  0.9443 train_acc 0.9468833
epoches:  40 val_acc:  0.9445 train_acc 0.94741666
epoches:  41 val_acc:  0.9462 train_acc 0.9482
epoches:  42 val_acc:  0.947 train_acc 0.94893336
epoches:  43 val_acc:  0.9472 train_acc 0.94946665
epoches:  44 val_acc:  0.948 train_acc 0.95028335
epoches:  45 val_acc:  0.9486 train_acc 0.95095
epoches:  46 val_acc:  0.9488 train_acc 0.9515833
epoches:  47 val_acc:  0.9492 train_acc 0.95213336
epoches:  48 val_acc:  0.9495 train_acc 0.9529833
epoches:  49 val_acc:  0.9498 train_acc 0.9537
val_acc:  0.9498


import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt

def to_onehot(y,num):
    lables = np.zeros([num,len(y)])
    for i in range(len(y)):
        lables[y[i],i] = 1
    return lables.T

# 预处理数据
mnist = keras.datasets.fashion_mnist
(train_images,train_lables),(test_images,test_lables) = mnist.load_data()

print('x_shape:',train_images.shape)
#(60000)
print('y_shape:',train_lables.shape)

X_train = train_images.reshape((-1,train_images.shape[1]*train_images.shape[1])) / 255.0
#X_train = tf.reshape(X_train,[-1,X_train.shape[1]*X_train.shape[2]])
Y_train = to_onehot(train_lables,10)
X_test = test_images.reshape((-1,test_images.shape[1]*test_images.shape[1])) / 255.0
Y_test = to_onehot(test_lables,10)

#双隐层的神经网络
input_nodes = 784
output_nodes = 10
layer1_nodes = 100
layer2_nodes = 50
batch_size = 100
learning_rate_base = 0.8
learning_rate_decay = 0.99
regularization_rate = 0.0000001
epochs = 50
mad = 0.99
learning_rate  = 0.005

# def inference(input_tensor,avg_class,w1,b1,w2,b2):
#     if avg_class == None:
#         layer1 = tf.nn.relu(tf.matmul(input_tensor,w1)+b1)
#         return tf.nn.softmax(tf.matmul(layer1,w2) + b2)
#     else:
#         layer1 = tf.nn.relu(tf.matmul(input_tensor,avg_class.average(w1)) + avg_class.average(b1))
#         return  tf.matual(layer1,avg_class.average(w2)) + avg_class.average(b2)

def train(mnist):
    X = tf.placeholder(tf.float32,[None,input_nodes],name = "input_x")
    Y = tf.placeholder(tf.float32,[None,output_nodes],name = "y_true")
    w1 = tf.Variable(tf.truncated_normal([input_nodes,layer1_nodes],stddev=0.1))
    b1 = tf.Variable(tf.constant(0.1,shape=[layer1_nodes]))
    w2 = tf.Variable(tf.truncated_normal([layer1_nodes,layer2_nodes],stddev=0.1))
    b2 = tf.Variable(tf.constant(0.1,shape=[layer2_nodes]))
    w3 = tf.Variable(tf.truncated_normal([layer2_nodes,output_nodes],stddev=0.1))
    b3 = tf.Variable(tf.constant(0.1,shape=[output_nodes]))

    layer1 = tf.nn.relu(tf.matmul(X,w1)+b1)
    A2 = tf.nn.relu(tf.matmul(layer1,w2)+b2)
    A3 = tf.nn.relu(tf.matmul(A2,w3)+b3)

    y_hat = tf.nn.softmax(A3)
#     y_hat = inference(X,None,w1,b1,w2,b2)

#     global_step = tf.Variable(0,trainable=False)
#     variable_averages = tf.train.ExponentialMovingAverage(mad,global_step)
#     varible_average_op = variable_averages.apply(tf.trainable_variables())

    #y = inference(x,variable_averages,w1,b1,w2,b2)
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=A3,labels=Y))
    regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)

    regularization = regularizer(w1) + regularizer(w2) +regularizer(w3)
    loss = cross_entropy + regularization * regularization_rate

#     learning_rate = tf.train.exponential_decay(learning_rate_base,global_step,epchos,learning_rate_decay)

#     train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)


#     with tf.control_dependencies([train_step,varible_average_op]):
#         train_op = tf.no_op(name="train")


    correct_prediction = tf.equal(tf.argmax(y_hat,1),tf.argmax(Y,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    total_loss = []
    val_acc = []
    total_train_acc = []
    x_Xsis = []

    with tf.Session() as sess:
        tf.global_variables_initializer().run()

        for i in range(epochs):
#             x,y = next_batch(X_train,Y_train,batch_size)
            batchs = int(X_train.shape[0] / batch_size + 1)
            loss_e = 0.
            for j in range(batchs):

                batch_x = X_train[j*batch_size:min(X_train.shape[0],j*(batch_size+1)),:]
                batch_y = Y_train[j*batch_size:min(X_train.shape[0],j*(batch_size+1)),:]
                sess.run(train_step,feed_dict={X:batch_x,Y:batch_y})
                loss_e += sess.run(loss,feed_dict={X:batch_x,Y:batch_y})
#             train_step.run(feed_dict={X:x,Y:y})
            validate_acc = sess.run(accuracy,feed_dict={X:X_test,Y:Y_test})
            train_acc = sess.run(accuracy,feed_dict={X:X_train,Y:Y_train})
            print("epoches: ",i,"val_acc: ",validate_acc,"train_acc",train_acc) 
            total_loss.append(loss_e / batch_size)
            val_acc.append(validate_acc)
            total_train_acc.append(train_acc)
            x_Xsis.append(i)
        validate_acc = sess.run(accuracy,feed_dict={X:X_test,Y:Y_test})
        print("val_acc: ",validate_acc)
    return (x_Xsis,total_loss,total_train_acc,val_acc)

result = train((X_train,Y_train,X_test,Y_test))

def plot_acc(total_train_acc,val_acc,x):
    plt.figure()
    plt.plot(x,total_train_acc,'--',color = "red",label="train_acc")
    plt.plot(x,val_acc,color="green",label="val_acc")
    plt.xlabel("Epoches")
    plt.ylabel("acc")
    plt.legend()
    plt.show()

  • 写回答

1条回答 默认 最新

  • zqbnqsdsmd 2019-11-05 23:17
    关注
    评论

报告相同问题?

悬赏问题

  • ¥20 有关区间dp的问题求解
  • ¥15 多电路系统共用电源的串扰问题
  • ¥15 slam rangenet++配置
  • ¥15 有没有研究水声通信方面的帮我改俩matlab代码
  • ¥15 对于相关问题的求解与代码
  • ¥15 ubuntu子系统密码忘记
  • ¥15 信号傅里叶变换在matlab上遇到的小问题请求帮助
  • ¥15 保护模式-系统加载-段寄存器
  • ¥15 电脑桌面设定一个区域禁止鼠标操作
  • ¥15 求NPF226060磁芯的详细资料