m0_51139945 2022-05-02 15:26 采纳率: 50%
浏览 104
已结题

蹲人脸表情识别的代码资源

最近要做一个人脸表情识别的项目
请问各位有没有什么比较好的模型推荐(要可以训练的) 最好还有配套的论文 感谢了

  • 写回答

4条回答 默认 最新

  • 最笨的羊羊 大数据领域新星创作者 2022-05-05 13:44
    关注

    基于FER2013数据集做的表情识别,先训练模型,再验证模型表情识别的准确率

    
    from __future__ import division, print_function, absolute_import
    from tflearn.layers.core import input_data, dropout, fully_connected
    from tflearn.layers.conv import conv_2d, max_pool_2d, avg_pool_2d
    from tflearn.layers.normalization import local_response_normalization
    from tflearn.layers.merge_ops import merge
    from tflearn.layers.estimator import regression
    import tflearn
    import tensorflow as tf
    from PIL import Image
    import numpy as np
    from tflearn.layers.core import input_data, dropout, fully_connected
    from tflearn.layers.conv import conv_2d, max_pool_2d
    from tflearn.layers.normalization import local_response_normalization
    from tflearn.layers.estimator import regression
    from sklearn.model_selection import train_test_split
    # from sklearn.cross_validation import train_test_split
    from tflearn.layers.normalization import batch_normalization
    from tflearn.layers.merge_ops import merge
    
    #n = 5
    #X = np.load("C:/Users/ivylab/Desktop/FFF/image1.npy")/255
    #Y = np.load("C:/Users/ivylab/Desktop/FFF/labels1.npy")
    X = np.load(r"D:\FER2013\aaa\ffff\8-8/fer-mouth.npy")/255
    Y = np.load(r"D:\FER2013\aaa\ffff\8-8/fer-mouth-label.npy")
    X = X[:, :, :, np.newaxis]
    # print(labels)
    #X, X0, Y, Y0 = train_test_split(image / 255, labels, test_size=0)
    #X = X[:, :, :, np.newaxis]
    
    
    def net():
        img_prep = tflearn.ImagePreprocessing()
        img_prep.add_featurewise_zero_center()
        img_prep.add_featurewise_stdnorm()
        
    #    img_aug = tflearn.ImageAugmentation()
    #    img_aug.add_random_flip_leftright()
    #    img_aug.add_random_blur()
    #    img_aug.add_random_rotation(5.0)
    
        network = input_data(shape=[None, 48, 48, 1])
    #    network = conv_2d(network, 64, 3, activation='relu')
    #    network = max_pool_2d(network, 3, strides=2)
    #    network = batch_normalization(network)
    
        # conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name='conv1_7_7_s2')
        # pool1_3_3 = max_pool_2d(conv1_7_7, 3, strides=2)
        # pool1_3_3 = local_response_normalization(pool1_3_3)
        # conv2_3_3_reduce = conv_2d(pool1_3_3, 64, 1, activation='relu', name='conv2_3_3_reduce')
        # conv2_3_3 = conv_2d(conv2_3_3_reduce, 192, 3, activation='relu', name='conv2_3_3')
        # conv2_3_3 = local_response_normalization(conv2_3_3)
        # pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
    
        # 3a
    #    inception_3a_1_1 = conv_2d(network, 64, 1, activation='relu')
    ##    inception_3a_1_1 = batch_normalization(inception_3a_1_1 )
    #    inception_3a_3_3_reduce = conv_2d(network, 96, 1, activation='relu', name='inception_3a_3_3_reduce')
    ##    inception_3a_3_3_reduce = batch_normalization(inception_3a_3_3_reduce )
    #    inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128, filter_size=3, activation='relu')
    #    inception_3a_5_5_reduce = conv_2d(network, 16, filter_size=1, activation='relu', name='inception_3a_5_5_reduce')
    #    inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name='inception_3a_5_5')
    #    inception_3a_pool = max_pool_2d(network, kernel_size=3, strides=1)
    ##    inception_3a_3_3_reduce = batch_normalization(inception_3a_pool )
    #    inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu')
    #    inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1],
    #                                mode='concat', axis=3)
    #    network1 = max_pool_2d(inception_3a_output, kernel_size=3, strides=2, name='pool3_3_3')
    #    network1 = batch_normalization( network1)
    #    # 3b
    #    inception_3b_1_1 = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_1_1')
    #    inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu',
    #                                      name='inception_3b_3_3_reduce')
    #    inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu', name='inception_3b_3_3')
    #    inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu',
    #                                      name='inception_3b_5_5_reduce')
    #    inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name='inception_3b_5_5')
    #    inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool')
    #    inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1, activation='relu',
    #                                    name='inception_3b_pool_1_1')
    #    inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1],
    #                                mode='concat', axis=3, name='inception_3b_output')
    #    network1 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
    
    
    
    #    network = fully_connected(network1, 2048, activation='tanh')
    #    network = dropout(network, 0.5)
    #    network = fully_connected(network, 1024, activation='tanh')
    #    network = dropout(network, 0.5)
    #    #    network = fully_connected(network, 2, activation='softmax',restore=False)
    #    network = fully_connected(network, 2, activation='softmax')
    
    
    
        network = conv_2d(network, 64, 3,activation='relu' ) #22*22*32
       
        network = max_pool_2d(network, 3, strides=2)    #11*11*32
        network = batch_normalization(network)
        network = conv_2d(network, 96, 3, activation='relu') #11*11*96
        
        network = max_pool_2d(network, 3, strides=2)    # 5*5*96
        network = batch_normalization(network)
    #    network = conv_2d(network, 128, 3, activation='relu')
    #    network = conv_2d(network, 96, 5, activation='relu')
    #    network = fully_connected(network, 256, activation='tanh' )
    #    network = dropout(network, 0.5)
        network = conv_2d(network, 96, 1, activation='relu') #11*11*96
    #    network = fully_connected(network, 1024, activation='tanh' )
        network = dropout(network, 0.5)
    #    network = fully_connected(network, 2, activation='softmax')
        network = fully_connected(network, 2, activation='softmax',restore=False)
    #    network = regression(network, optimizer=tflearn.optimizers.Momentum(lr_decay=0.96),
    #                         loss='categorical_crossentropy',
    #                         learning_rate=0.001)
    
        
    #    network = max_pool_2d(network, 3, strides=2)    # 5*5*96
    #    network = batch_normalization(network)
        #    network = regression(network,optimizer='momentum',loss='categorical_crossentropy',learning_rate=0.001)
        #    network = regression(network,optimizer=tflearn.optimizers.Momentum(lr_decay=0.96),loss='categorical_crossentropy',learning_rate=0.0001)
        network = regression(network, optimizer=tflearn.optimizers.Momentum(lr_decay=0.96, decay_step=2000),
                             loss='categorical_crossentropy',
                             learning_rate=0.001)
        return network
    
    
    
    # Training
    max_test_acu = 0
    test_all = []
    train_all = []
    ss = []
    
    for i in range(4):
        prediction = []
        test_X = X[1000 * i:1000 * i + 1000, :, :, :]
        np.save(r'D:\FER2013\aaa\ffff\8-8/' + "test" + str(i) + "-" + "img", test_X)
        test_Y = Y[1000 * i:1000 * i + 1000, :]
        np.save(r'D:\FER2013\aaa\ffff\8-8/' + "test" + str(i) + "-" + "label", test_Y)
        train_X = np.delete(X, range(1000 * i, 1000 * i + 1000), axis=0)
        np.save(r'D:\FER2013\aaa\ffff\8-8/' + "train" + str(i) + "-" + "img", train_X)
        train_Y = np.delete(Y, range(1000 * i, 1000 * i + 1000), axis=0)
        np.save(r'D:\FER2013\aaa\ffff\8-8/' + "train" + str(i) + "-" + "label", train_Y)
        test_acu = []
        train_acu = []
        tf.reset_default_graph()
        network = net()
        model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                            max_checkpoints=1, tensorboard_verbose=0)
    #    model.load("D:/train/fei/yu_Model.tfl")
        for j in range(100):
            print("********")
            print(i)
            model.fit(train_X, train_Y, n_epoch=1, shuffle=True,
                      show_metric=True, batch_size=8, snapshot_step=1000,
                      snapshot_epoch=False, run_id='alexnet_fer2013')
            acu_train = model.evaluate(train_X, train_Y)
            acu_test = model.evaluate(test_X, test_Y)
            train_acu.append(acu_train[0])
            test_acu.append(acu_test[0])
            #        for index in range(len(test_X)):
            #            test_X1=test_X[index]
            #            test_X2=test_X1.reshape(1,48,48,1)
            #            a=model.predict(test_X2)
            #            prediction.append(a)
            #            prediction1=np.array(prediction)
            #            np.save('D:/toupiao'+str(i)+"-prediction1.npy",prediction1)
            ##                for index in range(len(test_X)):
            ##            test_X=test_X[index:index+1,:,:,:]
            #            test_X1=test_X[index]
            ##            test_X1=test_X[0]
            #            test_X2=test_X1.reshape(1,48,48,1)
            #            a=model.predict(test_X2)
            #
            #
            #            prediction.append(a)
            if acu_test[0] > max_test_acu:
                max_test_acu = acu_test[0]
                model.save(r'D:\FER2013\aaa\ffff\8-8/' + str(j) + "-" + str(i) + "new_model.tfl")
                for index in range(len(test_X)):
                    test_X1 = test_X[index]
                    test_X2 = test_X1.reshape(1, 48, 48, 1)
                    a = model.predict(test_X2)
                    prediction.append(a)
                    prediction1 = np.array(prediction)
                    np.save(r'D:\FER2013\aaa\ffff\8-8/' + str(i) + "-prediction1.npy", prediction1)
            else:
                continue
                #    for index in range(len(test_X)):
                #        test_X1=test_X[index]
                #        test_X2=test_X1.reshape(1,48,48,1)
                #        a=model.predict(test_X2)
                #        prediction.append(a)
                #        prediction1=np.array(prediction)
                #        np.save('D:/tou_piao/'+str(i)+"-prediction1.npy",prediction1)
    
                #    if(acu_test>max_test_acu):
                #        model.save("D:/train/aa/"+str(i)+"-new_model.tfl")
    
        test_all.append(test_acu)
        train_all.append(train_acu)
    aaa = np.array(test_all).reshape(4, 100)
    bbb = np.array(train_all).reshape(4, 100)
    np.save(r'D:\FER2013\aaa\ffff\8-8/test_all.npy', aaa)
    np.save(r'D:\FER2013\aaa\ffff\8-8/train_all.npy', bbb)
    s = 0
    avg = 0
    for m in range(4):
        avg = avg + max(aaa[m])
        s = max(aaa[m])
        ss.append(s)
    ss = np.array(ss)
    np.save(r'D:\FER2013\aaa\ffff\8-8/test.npy', ss)
    # np.save('x_shuffle.npy',X)
    # np.save('y_shuffle.npy',Y)
    print("*********avg")
    print(avg / 4)
    print("**^^^")
    print(ss)
    # result_all=np.vstack((result1,result2,result3,result4,result5,result6,result7,result8,result9,result10))
    print('fcn_Done!')
    model.save(r"D:\FER2013\aaa\ffff\8-8/FCN.tfl")
    
    
    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(3条)

报告相同问题?

问题事件

  • 系统已结题 5月17日
  • 已采纳回答 5月9日
  • 创建了问题 5月2日

悬赏问题

  • ¥100 set_link_state
  • ¥15 虚幻5 UE美术毛发渲染
  • ¥15 CVRP 图论 物流运输优化
  • ¥15 Tableau online 嵌入ppt失败
  • ¥100 支付宝网页转账系统不识别账号
  • ¥15 基于单片机的靶位控制系统
  • ¥15 真我手机蓝牙传输进度消息被关闭了,怎么打开?(关键词-消息通知)
  • ¥15 装 pytorch 的时候出了好多问题,遇到这种情况怎么处理?
  • ¥20 IOS游览器某宝手机网页版自动立即购买JavaScript脚本
  • ¥15 手机接入宽带网线,如何释放宽带全部速度