zoe9698 2019-05-13 10:46 采纳率: 0%
浏览 1627

tensorflow CNN训练图片分类的时候,模型训练不出来,准确率0.1(分了十类),模型失效,是什么原因?

def compute_accuracy(v_xs, v_ys):
        global prediction
        y_pre = sess.run(prediction, feed_dict={xs: v_xs, keep_prob: 1})
        correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        result = sess.run(accuracy, feed_dict={xs: v_xs, ys: v_ys, keep_prob: 1})
        return result

def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
        # stride [1, x_movement, y_movement, 1]
        # Must have strides[0] = strides[3] = 1
        return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
        # stride [1, x_movement, y_movement, 1]
        return tf.nn.max_pool(x, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME')


# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 65536])/255.   # 256x256
ys = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
x_image = tf.reshape(xs, [-1, 256, 256, 1])
# print(x_image.shape)  # [n_samples, 28,28,1]


## conv1 layer ##
W_conv1 = weight_variable([3,3, 1,64]) # patch 5x5, in size 1, out size 32
b_conv1 = bias_variable([64])
h_conv1 = tf.nn.elu(conv2d(x_image, W_conv1) + b_conv1) # output size 28x28x32
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')                                         # output size 14x14x32

## conv2 layer ##
W_conv2 = weight_variable([3,3, 64, 128]) # patch 5x5, in size 32, out size 64
b_conv2 = bias_variable([128])
h_conv2 = tf.nn.elu(conv2d(h_pool1, W_conv2) + b_conv2) # output size 14x14x64
h_pool2 = max_pool_2x2(h_conv2)                                         # output size 7x7x64

## conv3 layer ##
W_conv3 = weight_variable([3,3, 128, 256]) # patch 5x5, in size 32, out size 64
b_conv3 = bias_variable([256])
h_conv3 = tf.nn.elu(conv2d(h_pool2, W_conv3) + b_conv3) # output size 14x14x64
h_pool3 = max_pool_2x2(h_conv3)   

## conv4 layer ##
W_conv4 = weight_variable([3,3, 256, 512]) # patch 5x5, in size 32, out size 64
b_conv4 = bias_variable([512])
h_conv4 = tf.nn.elu(conv2d(h_pool3, W_conv4) + b_conv4) # output size 14x14x64
h_pool4 = max_pool_2x2(h_conv4) 

# ## conv5 layer ##
# W_conv5 = weight_variable([3,3, 512, 512]) # patch 5x5, in size 32, out size 64
# b_conv5 = bias_variable([512])
# h_conv5 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4) # output size 14x14x64
# h_pool5 = max_pool_2x2(h_conv4) 


## fc1 layer ##
W_fc1 = weight_variable([2*2*512, 128])
b_fc1 = bias_variable([128])
# [n_samples, 7, 7, 64] ->> [n_samples, 7*7*64]
h_pool4_flat = tf.reshape(h_pool4, [-1, 2*2*512])
h_fc1 = tf.nn.elu(tf.matmul(h_pool4_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

## fc2 layer ##
W_fc2 = weight_variable([128, 10])
b_fc2 = bias_variable([10])
prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)



    # 定义优化器和训练op

    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys, logits=prediction))
    train_step = tf.train.RMSPropOptimizer((1e-3)).minimize(loss)
    correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(ys, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))



# 用于保存和载入模型
saver = tf.train.Saver()

def int2onehot(train_batch_ys):  
    num_labels = train_batch_ys.shape[0]
    num_classes=10
    index_offset = np.arange(num_labels) * num_classes
    labels_one_hot = np.zeros((num_labels, num_classes),dtype=np.float32)
    labels_one_hot.flat[index_offset + train_batch_ys.ravel()] = 1
    return labels_one_hot

train_label_lists, train_data_lists, train_fname_lists = read_tfrecords(train_tfrecord_file) 
iterations = 100
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # 执行训练迭代
    for it in range(iterations):
        # 这里的关键是要把输入数组转为np.array
        for i in range(200):
            train_label_list = train_label_lists[i]
            train_data_list= train_data_lists[i]
            train_name_list = train_fname_lists[i]
            #print("shape of train_data_list: {}\tshape of train_label_list: {}".format(train_data_list.shape, train_label_list.shape)) 


            #print('该批文件名:',train_name_list)
            print('该批标签:',train_label_list)
            # 计算有多少类图片
            #num_classes = len(set(train_label_list))
            #print("num_classes:",num_classes)


            train_batch_xs = train_data_list
            train_batch_xs = np.reshape(train_batch_xs, (-1, 65536))
            train_batch_ys = train_label_list
            train_batch_ys = int2onehot(train_batch_ys)

            #print('第'+str(i)+'批-----------') 
            print("连接层1之后----------------------------------------")
            for i in range(80):
                print("元素"+str(i)+":",sess.run(tf.reduce_mean(sess.run(h_fc1_drop,feed_dict={xs: train_batch_xs, ys: train_batch_ys, keep_prob: 0.5})[i].shape)))
                print("元素"+str(i)+":",sess.run(h_fc1_drop,feed_dict={xs: train_batch_xs, ys: train_batch_ys, keep_prob: 0.5})[i])

            print("连接层2之后----------------------------------------")
            for i in range(80):
                print("元素"+str(i)+":",sess.run(tf.reduce_mean(sess.run(prediction,feed_dict={xs: train_batch_xs, ys: train_batch_ys, keep_prob: 0.5})[i].shape)))
                print("元素"+str(i)+":",sess.run(prediction,feed_dict={xs: train_batch_xs, ys: train_batch_ys, keep_prob: 0.5})[i])


            #loss.run(feed_dict={xs: train_batch_xs, ys: train_batch_ys, keep_prob: 0.5})
            train_step.run(feed_dict={xs: train_batch_xs, ys: train_batch_ys, keep_prob: 0.5})
            time.sleep(7)

        # 每完成五次迭代,判断准确度是否已达到100%,达到则退出迭代循环
        iterate_accuracy = 0
        if it%5 == 0:
            iterate_accuracy = accuracy.eval(feed_dict={xs: train_batch_xs, ys: train_batch_ys, keep_prob: 0.5})
            print ('iteration %d: accuracy %s' % (it, iterate_accuracy))
            if iterate_accuracy >= 1:
                break;

    print ('完成训练!')


  • 写回答

1条回答

  • CSDN-Ada助手 CSDN-AI 官方账号 2022-10-25 19:28
    关注
    不知道你这个问题是否已经解决, 如果还没有解决的话:

    如果你已经解决了该问题, 非常希望你能够分享一下解决方案, 写成博客, 将相关链接放在评论区, 以帮助更多的人 ^-^
    评论

报告相同问题?

悬赏问题

  • ¥20 ML307A在使用AT命令连接EMQX平台的MQTT时被拒绝
  • ¥20 腾讯企业邮箱邮件可以恢复么
  • ¥15 有人知道怎么将自己的迁移策略布到edgecloudsim上使用吗?
  • ¥15 错误 LNK2001 无法解析的外部符号
  • ¥50 安装pyaudiokits失败
  • ¥15 计组这些题应该咋做呀
  • ¥60 更换迈创SOL6M4AE卡的时候,驱动要重新装才能使用,怎么解决?
  • ¥15 让node服务器有自动加载文件的功能
  • ¥15 jmeter脚本回放有的是对的有的是错的
  • ¥15 r语言蛋白组学相关问题