cherryztata
cxzx
采纳率0%
2019-10-12 17:42

tensorflow模型推理,两个列表串行,输出结果是第一个列表的循环,新手求教

30

tensorflow模型推理,两个列表串行,输出结果是第一个列表的循环,新手求教

from __future__ import print_function
import argparse
from datetime import datetime
import os
import sys
import time
import scipy.misc
import scipy.io as sio
import cv2
from glob import glob
import multiprocessing
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

import tensorflow as tf
import numpy as np
from PIL import Image
from utils import *

N_CLASSES = 20
DATA_DIR = './datasets/CIHP'
LIST_PATH = './datasets/CIHP/list/val2.txt'
DATA_ID_LIST = './datasets/CIHP/list/val_id2.txt'
with open(DATA_ID_LIST, 'r') as f:
    NUM_STEPS = len(f.readlines()) 
RESTORE_FROM = './checkpoint/CIHP_pgn'

# Load reader.
with tf.name_scope("create_inputs") as scp1:
    reader = ImageReader(DATA_DIR, LIST_PATH, DATA_ID_LIST, None, False, False, False, None)
    image, label, edge_gt = reader.image, reader.label, reader.edge
    image_rev = tf.reverse(image, tf.stack([1]))
    image_list = reader.image_list

image_batch = tf.stack([image, image_rev])
label_batch = tf.expand_dims(label, dim=0) # Add one batch dimension.
edge_gt_batch = tf.expand_dims(edge_gt, dim=0)
h_orig, w_orig = tf.to_float(tf.shape(image_batch)[1]), tf.to_float(tf.shape(image_batch)[2])
image_batch050 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 0.50)), tf.to_int32(tf.multiply(w_orig, 0.50))]))
image_batch075 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 0.75)), tf.to_int32(tf.multiply(w_orig, 0.75))]))
image_batch125 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 1.25)), tf.to_int32(tf.multiply(w_orig, 1.25))]))
image_batch150 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 1.50)), tf.to_int32(tf.multiply(w_orig, 1.50))]))
image_batch175 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 1.75)), tf.to_int32(tf.multiply(w_orig, 1.75))]))

新建网络

# Create network.
with tf.variable_scope('', reuse=False) as scope:
    net_100 = PGNModel({'data': image_batch}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
    net_050 = PGNModel({'data': image_batch050}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
    net_075 = PGNModel({'data': image_batch075}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
    net_125 = PGNModel({'data': image_batch125}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
    net_150 = PGNModel({'data': image_batch150}, is_training=False, n_classes=N_CLASSES)
with tf.variable_scope('', reuse=True):
    net_175 = PGNModel({'data': image_batch175}, is_training=False, n_classes=N_CLASSES)

# parsing net
parsing_out1_050 = net_050.layers['parsing_fc']
parsing_out1_075 = net_075.layers['parsing_fc']
parsing_out1_100 = net_100.layers['parsing_fc']
parsing_out1_125 = net_125.layers['parsing_fc']
parsing_out1_150 = net_150.layers['parsing_fc']
parsing_out1_175 = net_175.layers['parsing_fc']

parsing_out2_050 = net_050.layers['parsing_rf_fc']
parsing_out2_075 = net_075.layers['parsing_rf_fc']
parsing_out2_100 = net_100.layers['parsing_rf_fc']
parsing_out2_125 = net_125.layers['parsing_rf_fc']
parsing_out2_150 = net_150.layers['parsing_rf_fc']
parsing_out2_175 = net_175.layers['parsing_rf_fc']

# edge net
edge_out2_100 = net_100.layers['edge_rf_fc']
edge_out2_125 = net_125.layers['edge_rf_fc']
edge_out2_150 = net_150.layers['edge_rf_fc']
edge_out2_175 = net_175.layers['edge_rf_fc']

# combine resize
parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_050, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out1_075, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out1_100, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out1_125, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out1_150, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out1_175, tf.shape(image_batch)[1:3,])]), axis=0)

parsing_out2 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out2_050, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out2_075, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out2_100, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out2_125, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out2_150, tf.shape(image_batch)[1:3,]),
                                        tf.image.resize_images(parsing_out2_175, tf.shape(image_batch)[1:3,])]), axis=0)

edge_out2_100 = tf.image.resize_images(edge_out2_100, tf.shape(image_batch)[1:3,])
edge_out2_125 = tf.image.resize_images(edge_out2_125, tf.shape(image_batch)[1:3,])
edge_out2_150 = tf.image.resize_images(edge_out2_150, tf.shape(image_batch)[1:3,])
edge_out2_175 = tf.image.resize_images(edge_out2_175, tf.shape(image_batch)[1:3,])
edge_out2 = tf.reduce_mean(tf.stack([edge_out2_100, edge_out2_125, edge_out2_150, edge_out2_175]), axis=0)

raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2]), axis=0)
head_output, tail_output = tf.unstack(raw_output, num=2, axis=0)
tail_list = tf.unstack(tail_output, num=20, axis=2)
tail_list_rev = [None] * 20
for xx in range(14):
    tail_list_rev[xx] = tail_list[xx]
tail_list_rev[14] = tail_list[15]
tail_list_rev[15] = tail_list[14]
tail_list_rev[16] = tail_list[17]
tail_list_rev[17] = tail_list[16]
tail_list_rev[18] = tail_list[19]
tail_list_rev[19] = tail_list[18]
tail_output_rev = tf.stack(tail_list_rev, axis=2)
tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1]))

raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)
raw_output_all = tf.expand_dims(raw_output_all, dim=0)
pred_scores = tf.reduce_max(raw_output_all, axis=3)
raw_output_all = tf.argmax(raw_output_all, axis=3)
pred_all = tf.expand_dims(raw_output_all, dim=3) # Create 4-d tensor.

raw_edge = tf.reduce_mean(tf.stack([edge_out2]), axis=0)
head_output, tail_output = tf.unstack(raw_edge, num=2, axis=0)
tail_output_rev = tf.reverse(tail_output, tf.stack([1]))
raw_edge_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0)
raw_edge_all = tf.expand_dims(raw_edge_all, dim=0)
pred_edge = tf.sigmoid(raw_edge_all)
res_edge = tf.cast(tf.greater(pred_edge, 0.5), tf.int32)

# prepare ground truth 
preds = tf.reshape(pred_all, [-1,])
gt = tf.reshape(label_batch, [-1,])
weights = tf.cast(tf.less_equal(gt, N_CLASSES - 1), tf.int32) # Ignoring all labels greater than or equal to n_classes.
mIoU, update_op_iou = tf.contrib.metrics.streaming_mean_iou(preds, gt, num_classes=N_CLASSES, weights=weights)
macc, update_op_acc = tf.contrib.metrics.streaming_accuracy(preds, gt, weights=weights)

# # Which variables to load.
# restore_var = tf.global_variables()
# # Set up tf session and initialize variables. 
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True

# # gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
# # config=tf.ConfigProto(gpu_options=gpu_options)
# init = tf.global_variables_initializer()

# evaluate prosessing
parsing_dir = './output'

# Set up tf session and initialize variables. 
config = tf.ConfigProto()
config.gpu_options.allow_growth = True

以上是初始化网络和初始化参数载入模型,下面定义两个函数分别处理val1.txt和val2.txt两个列表内部的数据。

# 处理第一个列表函数
def humanParsing1():   
    # Which variables to load.
    restore_var = tf.global_variables()
    init = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        sess.run(init)
        sess.run(tf.local_variables_initializer())

        # Load weights.
        loader = tf.train.Saver(var_list=restore_var)
        if RESTORE_FROM is not None:
            if load(loader, sess, RESTORE_FROM):
                print(" [*] Load SUCCESS")
            else:
                print(" [!] Load failed...")

        # Create queue coordinator.
        coord = tf.train.Coordinator()

        # Start queue threads.
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)

        # Iterate over training steps.
        for step in range(NUM_STEPS):
            # parsing_, scores, edge_ = sess.run([pred_all, pred_scores, pred_edge])# , update_op
            parsing_, scores, edge_ = sess.run([pred_all, pred_scores, pred_edge])  # , update_op
            print('step {:d}'.format(step))
            print(image_list[step])
            img_split = image_list[step].split('/')
            img_id = img_split[-1][:-4]

            msk = decode_labels(parsing_, num_classes=N_CLASSES)
            parsing_im = Image.fromarray(msk[0])
            parsing_im.save('{}/{}_vis.png'.format(parsing_dir, img_id))
        coord.request_stop()
        coord.join(threads)


# 处理第二个列表函数
def humanParsing2():
    # Set up tf session and initialize variables. 
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
    # config=tf.ConfigProto(gpu_options=gpu_options)

    # Which variables to load.
    restore_var = tf.global_variables()
    init = tf.global_variables_initializer()

    with tf.Session(config=config) as sess:
        # Create queue coordinator.
        coord = tf.train.Coordinator()

        sess.run(init)
        sess.run(tf.local_variables_initializer())

        # Load weights.
        loader = tf.train.Saver(var_list=restore_var)
        if RESTORE_FROM is not None:
            if load(loader, sess, RESTORE_FROM):
                print(" [*] Load SUCCESS")
            else:
                print(" [!] Load failed...")


        LIST_PATH = './datasets/CIHP/list/val1.txt'
        DATA_ID_LIST = './datasets/CIHP/list/val_id1.txt'
        with open(DATA_ID_LIST, 'r') as f:
            NUM_STEPS = len(f.readlines()) 

        # with tf.name_scope("create_inputs"):
        with tf.name_scope(scp1):
            tf.get_variable_scope().reuse_variables()
            reader = ImageReader(DATA_DIR, LIST_PATH, DATA_ID_LIST, None, False, False, False, coord)
            image, label, edge_gt = reader.image, reader.label, reader.edge
            image_rev = tf.reverse(image, tf.stack([1]))
            image_list = reader.image_list

        # Start queue threads.
        threads = tf.train.start_queue_runners(coord=coord, sess=sess)

        # Load weights.
        loader = tf.train.Saver(var_list=restore_var)
        if RESTORE_FROM is not None:
            if load(loader, sess, RESTORE_FROM):
                print(" [*] Load SUCCESS")
            else:
                print(" [!] Load failed...")

        # Iterate over training steps.
        for step in range(NUM_STEPS):
            # parsing_, scores, edge_ = sess.run([pred_all, pred_scores, pred_edge])# , update_op
            parsing_, scores, edge_ = sess.run([pred_all, pred_scores, pred_edge])  # , update_op
            print('step {:d}'.format(step))
            print(image_list[step])
            img_split = image_list[step].split('/')
            img_id = img_split[-1][:-4]

            msk = decode_labels(parsing_, num_classes=N_CLASSES)
            parsing_im = Image.fromarray(msk[0])
            parsing_im.save('{}/{}_vis.png'.format(parsing_dir, img_id))
    coord.request_stop()
    coord.join(threads)


if __name__ == '__main__':
    humanParsing1()
    humanParsing2()


最终输出结果一直是第一个列表里面的循环,代码上用了
self.queue = tf.train.slice_input_producer([self.images, self.labels, self.edges], shuffle=shuffle),队列的方式进行多线程推理。最终得到的结果一直是第一个列表的循环,求大神告诉问题怎么解决。

  • 点赞
  • 写回答
  • 关注问题
  • 收藏
  • 复制链接分享
  • 邀请回答

1条回答

  • dashicaineng 蔡能教授,网站特聘专家 2年前

    import tensorflow as tf
    a = tf.constant(1)
    b = tf.constant(2)
    c = tf.constant(3)

    def cond(a, b, c):
    return a<5

    def body(a, b, c):
    a += 1
    b += 1
    c +=1
    return a, b, c # same with [a, b, c]

    a ,b, c = tf.while_loop(cond, body, [a,b,c])

    with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print sess.run([a, b, c]) #[5, 6, 7]

    点赞 评论 复制链接分享