python循环中float与int的问题

def shell_sort(lists):
count = len(lists)
step = 2
group = count/step
group = int(group)
print(group)
while group > 0:
for i in range(0,group):
j = i + group
while j < count:
k = j - group
key = lists[j]
while k >= 0:
if lists[k] > key:
lists[k + group], lists[k] = lists[k], key
else:break
k -= group
j += group
group /= step
return lists

    每次运行会显示'float' object cannot be interpreted as an integer错误
    请问是怎么回事

2个回答

极有可能的原因是倒数第二行中的group为float值,导致在大循环while group > 0:
for i in range(0,group):中的range出了问题!!!
话说为什么没有缩放!!!

代码贴全了?从贴出的代码看不出问题出在哪里

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
新人求助,python中if语句判断未生效
我写的python代码中有一处用if语句判断,通过追踪变量发现当if后的条件应该正确时,程序跳过了if后面的语句。求问是怎么回事? 代码: ``` for each in sol: if (isinstance(each[0],(float,int)) \ #判断第一个元素是否为数字 and each[0]>min(point1[0],point2[0]) \ and each[0]<max(point1[0],point2[0])): #x值是否在两点之间 elm=[each[0],each[1],each[2]] return elm ``` 相关截图 ![图片说明](https://img-ask.csdn.net/upload/201904/18/1555522005_614048.png) 此时VS显示的局部变量为: ![图片说明](https://img-ask.csdn.net/upload/201904/18/1555522083_207414.png) 可以看到此时each所代表的点x值在point1和point2之间,但执行下一句依然返回到了外层循环,请问是哪里出了问题?求大佬解答,在此提前感谢!!!
创建一个新的nc文件,合并存储所有现存以一小时划分的nc文件
现在有按小时划分的若干nc文件(即,所有nc文件的时间维度为1); 想要利用Python创建一个新的时间维度为小时数的nc文件去整合存储所有单独的文件; 思路如下: S1:生成指定文件夹内nc文件列表filelist; S2:创建新的nc文件ncfile; S3:用循环读取按小时划分的nc文件,并按时间先后存储其variable到新的ncfile中。 目前写到S2发生报错“AttributeError: NetCDF: Attribute not found”。 想知道需要如何修改代码去处理此报错。代码如下,谢谢大家的建议! ``` import os from netCDF4 import Dataset import numpy as np fileList = [] ##生成指定文件夹内nc文件列表fileList path = 'D:\\Sort out\\ChinaRainfall py\\hourly_FiT_2008_output' dirs = os.listdir(path) for i in dirs: if os.path.splitext(i)[1] == '.nc': fileList.append(i) #print(fileList ) filenumber = len(fileList) ##create the merged netcdf file to store the result. #New netCDF file ncfile = Dataset('Chinarain2008','w',format = 'NETCDF4_CLASSIC') #Add dimensions xdim = ncfile.createDimension('x',617) ydim = ncfile.createDimension('y',386) tdim = ncfile.createDimension('time',filenumber) #Add global attributes ncfile.addgroupsttr('title','Chinarainfall2008') ncfile.addgroupsttr('geospatial_lat_min','15 degrees') ncfile.addgroupsttr('geospatial_lat_max','60 degrees') ncfile.addgroupsttr('geospatial_lon_min','70 degrees') ncfile.addgroupsttr('geospatial_lon_min','140 degrees') #Add variables variables = [] var = ncfile.createVariable('x',np.float32,[xdim]) var.addattr('long_name','xgrid') var.addattr('units','num') variables.append(var) var = ncfile.createVariable('y',np.float32,[ydim]) var.addattr('long_name','ygrid') var.addattr('units','num') variables.append(var) tvar = ncfile.createVariable('time',np.int,[tdim]) tvar.addattr('long_name','time') tvar.addattr('units','hours since 2008-01-01 00:00:0.0') variables.append(tvar) var = ncfile.createVariable('rainfall',np.float,[tdim,ydim,xdim]) var.addattr('long_name','rainfall') var.addattr('units','mm') variables.append(var) #Create netCDF file ncfile.close() ``` 参考代码来源:https://www.cnblogs.com/yaqiang/p/4854415.html
python机器学习+=与=+
```python import numpy as np import pandas as pd import requests def getdata(url): content=requests.get(url).content content=content.decode("utf-8") content=content.split("\n") X = [] Y = [] for line in content[:-1]: x1,y1 = line.split("\t") x1 = x1.split() for i in range(len(x1)): x1[i] = float(x1[i]) X.append([1]+x1) Y.append(int(y1)) X=np.array(X) Y=np.array(Y) return X,Y def init(X): w = np.array([0]*len(X[1]),dtype=float) return w def inter_iter(X,Y,w): OK = True for i in range(len(X)): mul = np.dot(w,X[i]) if mul<0: res = -1 else : res = 1 if res != Y[i]: OK = False w += Y[i]*X[i] return OK def PLA(url): X,Y = getdata(url) w = init(X) cnt = 1 while (inter_iter(X,Y,w)!=True): cnt += 1 print(cnt) if __name__=='__main__': PLA("https://www.csie.ntu.edu.tw/~htlin/mooc/datasets/mlfound_math/hw1_15_train.dat") ``` inter iter函数中`w += Y[i]*X[i]`如果改成`w = w + Y[i]*X[i]`就会出错,刚开始数字还对,循环几次之后答案会不对了,有大佬知道为什么吗?
tensorflow模型推理,两个列表串行,输出结果是第一个列表的循环,新手求教
tensorflow模型推理,两个列表串行,输出结果是第一个列表的循环,新手求教 ``` from __future__ import print_function import argparse from datetime import datetime import os import sys import time import scipy.misc import scipy.io as sio import cv2 from glob import glob import multiprocessing os.environ["CUDA_VISIBLE_DEVICES"] = "0" import tensorflow as tf import numpy as np from PIL import Image from utils import * N_CLASSES = 20 DATA_DIR = './datasets/CIHP' LIST_PATH = './datasets/CIHP/list/val2.txt' DATA_ID_LIST = './datasets/CIHP/list/val_id2.txt' with open(DATA_ID_LIST, 'r') as f: NUM_STEPS = len(f.readlines()) RESTORE_FROM = './checkpoint/CIHP_pgn' # Load reader. with tf.name_scope("create_inputs") as scp1: reader = ImageReader(DATA_DIR, LIST_PATH, DATA_ID_LIST, None, False, False, False, None) image, label, edge_gt = reader.image, reader.label, reader.edge image_rev = tf.reverse(image, tf.stack([1])) image_list = reader.image_list image_batch = tf.stack([image, image_rev]) label_batch = tf.expand_dims(label, dim=0) # Add one batch dimension. edge_gt_batch = tf.expand_dims(edge_gt, dim=0) h_orig, w_orig = tf.to_float(tf.shape(image_batch)[1]), tf.to_float(tf.shape(image_batch)[2]) image_batch050 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 0.50)), tf.to_int32(tf.multiply(w_orig, 0.50))])) image_batch075 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 0.75)), tf.to_int32(tf.multiply(w_orig, 0.75))])) image_batch125 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 1.25)), tf.to_int32(tf.multiply(w_orig, 1.25))])) image_batch150 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 1.50)), tf.to_int32(tf.multiply(w_orig, 1.50))])) image_batch175 = tf.image.resize_images(image_batch, tf.stack([tf.to_int32(tf.multiply(h_orig, 1.75)), tf.to_int32(tf.multiply(w_orig, 1.75))])) ``` 新建网络 ``` # Create network. with tf.variable_scope('', reuse=False) as scope: net_100 = PGNModel({'data': image_batch}, is_training=False, n_classes=N_CLASSES) with tf.variable_scope('', reuse=True): net_050 = PGNModel({'data': image_batch050}, is_training=False, n_classes=N_CLASSES) with tf.variable_scope('', reuse=True): net_075 = PGNModel({'data': image_batch075}, is_training=False, n_classes=N_CLASSES) with tf.variable_scope('', reuse=True): net_125 = PGNModel({'data': image_batch125}, is_training=False, n_classes=N_CLASSES) with tf.variable_scope('', reuse=True): net_150 = PGNModel({'data': image_batch150}, is_training=False, n_classes=N_CLASSES) with tf.variable_scope('', reuse=True): net_175 = PGNModel({'data': image_batch175}, is_training=False, n_classes=N_CLASSES) # parsing net parsing_out1_050 = net_050.layers['parsing_fc'] parsing_out1_075 = net_075.layers['parsing_fc'] parsing_out1_100 = net_100.layers['parsing_fc'] parsing_out1_125 = net_125.layers['parsing_fc'] parsing_out1_150 = net_150.layers['parsing_fc'] parsing_out1_175 = net_175.layers['parsing_fc'] parsing_out2_050 = net_050.layers['parsing_rf_fc'] parsing_out2_075 = net_075.layers['parsing_rf_fc'] parsing_out2_100 = net_100.layers['parsing_rf_fc'] parsing_out2_125 = net_125.layers['parsing_rf_fc'] parsing_out2_150 = net_150.layers['parsing_rf_fc'] parsing_out2_175 = net_175.layers['parsing_rf_fc'] # edge net edge_out2_100 = net_100.layers['edge_rf_fc'] edge_out2_125 = net_125.layers['edge_rf_fc'] edge_out2_150 = net_150.layers['edge_rf_fc'] edge_out2_175 = net_175.layers['edge_rf_fc'] # combine resize parsing_out1 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out1_050, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out1_075, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out1_100, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out1_125, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out1_150, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out1_175, tf.shape(image_batch)[1:3,])]), axis=0) parsing_out2 = tf.reduce_mean(tf.stack([tf.image.resize_images(parsing_out2_050, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out2_075, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out2_100, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out2_125, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out2_150, tf.shape(image_batch)[1:3,]), tf.image.resize_images(parsing_out2_175, tf.shape(image_batch)[1:3,])]), axis=0) edge_out2_100 = tf.image.resize_images(edge_out2_100, tf.shape(image_batch)[1:3,]) edge_out2_125 = tf.image.resize_images(edge_out2_125, tf.shape(image_batch)[1:3,]) edge_out2_150 = tf.image.resize_images(edge_out2_150, tf.shape(image_batch)[1:3,]) edge_out2_175 = tf.image.resize_images(edge_out2_175, tf.shape(image_batch)[1:3,]) edge_out2 = tf.reduce_mean(tf.stack([edge_out2_100, edge_out2_125, edge_out2_150, edge_out2_175]), axis=0) raw_output = tf.reduce_mean(tf.stack([parsing_out1, parsing_out2]), axis=0) head_output, tail_output = tf.unstack(raw_output, num=2, axis=0) tail_list = tf.unstack(tail_output, num=20, axis=2) tail_list_rev = [None] * 20 for xx in range(14): tail_list_rev[xx] = tail_list[xx] tail_list_rev[14] = tail_list[15] tail_list_rev[15] = tail_list[14] tail_list_rev[16] = tail_list[17] tail_list_rev[17] = tail_list[16] tail_list_rev[18] = tail_list[19] tail_list_rev[19] = tail_list[18] tail_output_rev = tf.stack(tail_list_rev, axis=2) tail_output_rev = tf.reverse(tail_output_rev, tf.stack([1])) raw_output_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0) raw_output_all = tf.expand_dims(raw_output_all, dim=0) pred_scores = tf.reduce_max(raw_output_all, axis=3) raw_output_all = tf.argmax(raw_output_all, axis=3) pred_all = tf.expand_dims(raw_output_all, dim=3) # Create 4-d tensor. raw_edge = tf.reduce_mean(tf.stack([edge_out2]), axis=0) head_output, tail_output = tf.unstack(raw_edge, num=2, axis=0) tail_output_rev = tf.reverse(tail_output, tf.stack([1])) raw_edge_all = tf.reduce_mean(tf.stack([head_output, tail_output_rev]), axis=0) raw_edge_all = tf.expand_dims(raw_edge_all, dim=0) pred_edge = tf.sigmoid(raw_edge_all) res_edge = tf.cast(tf.greater(pred_edge, 0.5), tf.int32) # prepare ground truth preds = tf.reshape(pred_all, [-1,]) gt = tf.reshape(label_batch, [-1,]) weights = tf.cast(tf.less_equal(gt, N_CLASSES - 1), tf.int32) # Ignoring all labels greater than or equal to n_classes. mIoU, update_op_iou = tf.contrib.metrics.streaming_mean_iou(preds, gt, num_classes=N_CLASSES, weights=weights) macc, update_op_acc = tf.contrib.metrics.streaming_accuracy(preds, gt, weights=weights) # # Which variables to load. # restore_var = tf.global_variables() # # Set up tf session and initialize variables. # config = tf.ConfigProto() # config.gpu_options.allow_growth = True # # gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.7) # # config=tf.ConfigProto(gpu_options=gpu_options) # init = tf.global_variables_initializer() # evaluate prosessing parsing_dir = './output' # Set up tf session and initialize variables. config = tf.ConfigProto() config.gpu_options.allow_growth = True ``` 以上是初始化网络和初始化参数载入模型,下面定义两个函数分别处理val1.txt和val2.txt两个列表内部的数据。 ``` # 处理第一个列表函数 def humanParsing1(): # Which variables to load. restore_var = tf.global_variables() init = tf.global_variables_initializer() with tf.Session(config=config) as sess: sess.run(init) sess.run(tf.local_variables_initializer()) # Load weights. loader = tf.train.Saver(var_list=restore_var) if RESTORE_FROM is not None: if load(loader, sess, RESTORE_FROM): print(" [*] Load SUCCESS") else: print(" [!] Load failed...") # Create queue coordinator. coord = tf.train.Coordinator() # Start queue threads. threads = tf.train.start_queue_runners(coord=coord, sess=sess) # Iterate over training steps. for step in range(NUM_STEPS): # parsing_, scores, edge_ = sess.run([pred_all, pred_scores, pred_edge])# , update_op parsing_, scores, edge_ = sess.run([pred_all, pred_scores, pred_edge]) # , update_op print('step {:d}'.format(step)) print(image_list[step]) img_split = image_list[step].split('/') img_id = img_split[-1][:-4] msk = decode_labels(parsing_, num_classes=N_CLASSES) parsing_im = Image.fromarray(msk[0]) parsing_im.save('{}/{}_vis.png'.format(parsing_dir, img_id)) coord.request_stop() coord.join(threads) # 处理第二个列表函数 def humanParsing2(): # Set up tf session and initialize variables. config = tf.ConfigProto() config.gpu_options.allow_growth = True # gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.7) # config=tf.ConfigProto(gpu_options=gpu_options) # Which variables to load. restore_var = tf.global_variables() init = tf.global_variables_initializer() with tf.Session(config=config) as sess: # Create queue coordinator. coord = tf.train.Coordinator() sess.run(init) sess.run(tf.local_variables_initializer()) # Load weights. loader = tf.train.Saver(var_list=restore_var) if RESTORE_FROM is not None: if load(loader, sess, RESTORE_FROM): print(" [*] Load SUCCESS") else: print(" [!] Load failed...") LIST_PATH = './datasets/CIHP/list/val1.txt' DATA_ID_LIST = './datasets/CIHP/list/val_id1.txt' with open(DATA_ID_LIST, 'r') as f: NUM_STEPS = len(f.readlines()) # with tf.name_scope("create_inputs"): with tf.name_scope(scp1): tf.get_variable_scope().reuse_variables() reader = ImageReader(DATA_DIR, LIST_PATH, DATA_ID_LIST, None, False, False, False, coord) image, label, edge_gt = reader.image, reader.label, reader.edge image_rev = tf.reverse(image, tf.stack([1])) image_list = reader.image_list # Start queue threads. threads = tf.train.start_queue_runners(coord=coord, sess=sess) # Load weights. loader = tf.train.Saver(var_list=restore_var) if RESTORE_FROM is not None: if load(loader, sess, RESTORE_FROM): print(" [*] Load SUCCESS") else: print(" [!] Load failed...") # Iterate over training steps. for step in range(NUM_STEPS): # parsing_, scores, edge_ = sess.run([pred_all, pred_scores, pred_edge])# , update_op parsing_, scores, edge_ = sess.run([pred_all, pred_scores, pred_edge]) # , update_op print('step {:d}'.format(step)) print(image_list[step]) img_split = image_list[step].split('/') img_id = img_split[-1][:-4] msk = decode_labels(parsing_, num_classes=N_CLASSES) parsing_im = Image.fromarray(msk[0]) parsing_im.save('{}/{}_vis.png'.format(parsing_dir, img_id)) coord.request_stop() coord.join(threads) if __name__ == '__main__': humanParsing1() humanParsing2() ``` 最终输出结果一直是第一个列表里面的循环,代码上用了 self.queue = tf.train.slice_input_producer([self.images, self.labels, self.edges], shuffle=shuffle),队列的方式进行多线程推理。最终得到的结果一直是第一个列表的循环,求大神告诉问题怎么解决。
tensorflow训练网络报错Invalid argument
##1.问题 程序报错,提示:Invalid argument: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,24] ##2.代码 ``` import time import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt # import dataset input_Dir = 'E:/data/input_H.csv' output_Dir = 'E:/data/output_H.csv' x_data = pd.read_csv(input_Dir, header = None) y_data = pd.read_csv(output_Dir, header = None) x_data = x_data.values y_data = y_data.values x_data = x_data.astype('float32') y_data = y_data.astype('float32') print("DATASET READY") # from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=1) row, column = x_train.shape row = float(row) # define structure of neural network n_hidden_1 = 250 n_hidden_2 = 128 n_input = 250 n_classes = 24 #initialize parameters x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_classes]) keep_prob = tf.placeholder(tf.float32) stddev = 0.1 weights = { 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)), 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)), 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev)) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1], stddev=stddev)), 'b2': tf.Variable(tf.random_normal([n_hidden_2], stddev=stddev)), 'out': tf.Variable(tf.random_normal([n_classes], stddev=stddev)) } print("NETWORK READY") # forward propagation def multilayer_perceptron(_X, _weights, _biases): layer_1 = tf.nn.leaky_relu(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1'])) layer_2 = tf.nn.leaky_relu(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2'])) return (tf.add(tf.matmul(layer_2, _weights['out']), _biases['out'])) # pred = multilayer_perceptron(x, weights, biases) cost = tf.reduce_mean(tf.square(y - pred)) optm = tf.train.GradientDescentOptimizer(learning_rate=0.03).minimize(cost) init = tf.global_variables_initializer() print("FUNCTIONS READY") n_epochs = 100000 batch_size = 512 n_batches = np.int(np.ceil(row / batch_size)) def fetch_batch(epoch, batch_index, batch_size): # 随机获取小批量数据 np.random.seed(epoch * n_batches + batch_index) indices = np.random.randint(row, size = batch_size) return x_train[indices], y_train[indices] iter = 10000 sess = tf.Session() sess.run(tf.global_variables_initializer()) feeds_test = {x: x_test, y: y_test, keep_prob: 1} for epoch in range(n_epochs): # 总共循环次数 for batch_index in range(n_batches): x_batch, y_batch = fetch_batch(epoch, batch_index, batch_size) feeds_train = {x: x_batch, y: y_batch, keep_prob: 1} sess.run(optm, feed_dict=feeds_train) print("EPOCH %d HAS FINISHED" % (epoch)) print("COST %d :" % (epoch)) print(sess.run(cost),feed_dict=feeds_train) print("\n") sess.close() print("FINISHED") ``` ##3.报错信息 Traceback (most recent call last): File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1356, in _do_call return fn(*args) File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1341, in _run_fn options, feed_dict, fetch_list, target_list, run_metadata) File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1429, in _call_tf_sessionrun run_metadata) tensorflow.python.framework.errors_impl.InvalidArgumentError: 2 root error(s) found. (0) Invalid argument: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,24] [[{{node Placeholder_1}}]] [[Mean/_7]] (1) Invalid argument: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,24] [[{{node Placeholder_1}}]] 0 successful operations. 0 derived errors ignored. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\IPython\core\interactiveshell.py", line 3296, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-2-762bc58e4306>", line 1, in <module> runfile('C:/Users/Administrator/Desktop/main/demo3.py', wdir='C:/Users/Administrator/Desktop/main') File "E:\Program Files\PyCharm 2019.1.3\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile #求问问题出在什么地方?
用tensorflow做机器翻译时训练代码有问题
``` # -*- coding:UTF-8 -*- import tensorflow as tf src_path = 'D:/Python37/untitled1/train.tags.en-zh.en.deletehtml' trg_path = 'D:/Python37/untitled1/train.tags.en-zh.zh.deletehtml' SRC_TRAIN_DATA = 'D:/Python37/untitled1/train.tags.en-zh.en.deletehtml.segment' # 源语言输入文件 TRG_TRAIN_DATA = 'D:/Python37/untitled1/train.tags.en-zh.zh.deletehtml.segment' # 目标语言输入文件 CHECKPOINT_PATH = './model/seq2seq_ckpt' # checkpoint保存路径 HIDDEN_SIZE = 1024 # LSTM的隐藏层规模 NUM_LAYERS = 2 # 深层循环神经网络中LSTM结构的层数 SRC_VOCAB_SIZE = 10000 # 源语言词汇表大小 TRG_VOCAB_SIZE = 4000 # 目标语言词汇表大小 BATCH_SIZE = 100 # 训练数据batch的大小 NUM_EPOCH = 5 # 使用训练数据的轮数 KEEP_PROB = 0.8 # 节点不被dropout的概率 MAX_GRAD_NORM = 5 # 用于控制梯度膨胀的梯度大小上限 SHARE_EMB_AND_SOFTMAX = True # 在softmax层和词向量层之间共享参数 MAX_LEN = 50 # 限定句子的最大单词数量 SOS_ID = 1 # 目标语言词汇表中<sos>的ID """ function: 数据batching,产生最后输入数据格式 Parameters: file_path-数据路径 Returns: dataset- 每个句子-对应的长度组成的TextLineDataset类的数据集对应的张量 """ def MakeDataset(file_path): dataset = tf.data.TextLineDataset(file_path) # map(function, sequence[, sequence, ...]) -> list # 通过定义可以看到,这个函数的第一个参数是一个函数,剩下的参数是一个或多个序列,返回值是一个集合。 # function可以理解为是一个一对一或多对一函数,map的作用是以参数序列中的每一个元素调用function函数,返回包含每次function函数返回值的list。 # lambda argument_list: expression # 其中lambda是Python预留的关键字,argument_list和expression由用户自定义 # argument_list参数列表, expression 为函数表达式 # 根据空格将单词编号切分开并放入一个一维向量 dataset = dataset.map(lambda string: tf.string_split([string]).values) # 将字符串形式的单词编号转化为整数 dataset = dataset.map(lambda string: tf.string_to_number(string, tf.int32)) # 统计每个句子的单词数量,并与句子内容一起放入Dataset dataset = dataset.map(lambda x: (x, tf.size(x))) return dataset """ function: 从源语言文件src_path和目标语言文件trg_path中分别读取数据,并进行填充和batching操作 Parameters: src_path-源语言,即被翻译的语言,英语. trg_path-目标语言,翻译之后的语言,汉语. batch_size-batch的大小 Returns: dataset- 每个句子-对应的长度 组成的TextLineDataset类的数据集 """ def MakeSrcTrgDataset(src_path, trg_path, batch_size): # 首先分别读取源语言数据和目标语言数据 src_data = MakeDataset(src_path) trg_data = MakeDataset(trg_path) # 通过zip操作将两个Dataset合并为一个Dataset,现在每个Dataset中每一项数据ds由4个张量组成 # ds[0][0]是源句子 # ds[0][1]是源句子长度 # ds[1][0]是目标句子 # ds[1][1]是目标句子长度 #https://blog.csdn.net/qq_32458499/article/details/78856530这篇博客看一下可以细致了解一下Dataset这个库,以及.map和.zip的用法 dataset = tf.data.Dataset.zip((src_data, trg_data)) # 删除内容为空(只包含<eos>)的句子和长度过长的句子 def FilterLength(src_tuple, trg_tuple): ((src_input, src_len), (trg_label, trg_len)) = (src_tuple, trg_tuple) # tf.logical_and 相当于集合中的and做法,后面两个都为true最终结果才会为true,否则为false # tf.greater Returns the truth value of (x > y),所以以下所说的是句子长度必须得大于一也就是不能为空的句子 # tf.less_equal Returns the truth value of (x <= y),所以所说的是长度要小于最长长度 src_len_ok = tf.logical_and(tf.greater(src_len, 1), tf.less_equal(src_len, MAX_LEN)) trg_len_ok = tf.logical_and(tf.greater(trg_len, 1), tf.less_equal(trg_len, MAX_LEN)) return tf.logical_and(src_len_ok, trg_len_ok) #两个都满足才返回true # filter接收一个函数Func并将该函数作用于dataset的每个元素,根据返回值True或False保留或丢弃该元素,True保留该元素,False丢弃该元素 # 最后得到的就是去掉空句子和过长的句子的数据集 dataset = dataset.filter(FilterLength) # 解码器需要两种格式的目标句子: # 1.解码器的输入(trg_input), 形式如同'<sos> X Y Z' # 2.解码器的目标输出(trg_label), 形式如同'X Y Z <eos>' # 上面从文件中读到的目标句子是'X Y Z <eos>'的形式,我们需要从中生成'<sos> X Y Z'形式并加入到Dataset # 编码器只有输入,没有输出,而解码器有输入也有输出,输入为<sos>+(除去最后一位eos的label列表) # 例如train.en最后都为2,id为2就是eos def MakeTrgInput(src_tuple, trg_tuple): ((src_input, src_len), (trg_label, trg_len)) = (src_tuple, trg_tuple) # tf.concat用法 https://blog.csdn.net/qq_33431368/article/details/79429295 trg_input = tf.concat([[SOS_ID], trg_label[:-1]], axis=0) return ((src_input, src_len), (trg_input, trg_label, trg_len)) dataset = dataset.map(MakeTrgInput) # 随机打乱训练数据 dataset = dataset.shuffle(10000) # 规定填充后的输出的数据维度 padded_shapes = ( (tf.TensorShape([None]), # 源句子是长度未知的向量 tf.TensorShape([])), # 源句子长度是单个数字 (tf.TensorShape([None]), # 目标句子(解码器输入)是长度未知的向量 tf.TensorShape([None]), # 目标句子(解码器目标输出)是长度未知的向量 tf.TensorShape([])) # 目标句子长度(输出)是单个数字 ) # 调用padded_batch方法进行padding 和 batching操作 batched_dataset = dataset.padded_batch(batch_size, padded_shapes) return batched_dataset """ function: seq2seq模型 Parameters: Returns: """ class NMTModel(object): """ function: 模型初始化 Parameters: Returns: """ def __init__(self): # 定义编码器和解码器所使用的LSTM结构 self.enc_cell = tf.nn.rnn_cell.MultiRNNCell( [tf.nn.rnn_cell.LSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)]) self.dec_cell = tf.nn.rnn_cell.MultiRNNCell( [tf.nn.rnn_cell.LSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)]) # 为源语言和目标语言分别定义词向量 self.src_embedding = tf.get_variable('src_emb', [SRC_VOCAB_SIZE, HIDDEN_SIZE]) self.trg_embedding = tf.get_variable('trg_emb', [TRG_VOCAB_SIZE, HIDDEN_SIZE]) # 定义softmax层的变量 if SHARE_EMB_AND_SOFTMAX: self.softmax_weight = tf.transpose(self.trg_embedding) else: self.softmax_weight = tf.get_variable('weight', [HIDDEN_SIZE, TRG_VOCAB_SIZE]) self.softmax_bias = tf.get_variable('softmax_loss', [TRG_VOCAB_SIZE]) """ function: 在forward函数中定义模型的前向计算图 Parameters:   MakeSrcTrgDataset函数产生的五种张量如下(全部为张量) src_input: 编码器输入(源数据) src_size : 输入大小 trg_input:解码器输入(目标数据) trg_label:解码器输出(目标数据) trg_size: 输出大小 Returns: """ def forward(self, src_input, src_size, trg_input, trg_label, trg_size): batch_size = tf.shape(src_input)[0] # 将输入和输出单词转为词向量(rnn中输入数据都要转换成词向量) # 相当于input中的每个id对应的embedding中的向量转换 src_emb = tf.nn.embedding_lookup(self.src_embedding, src_input) trg_emb = tf.nn.embedding_lookup(self.trg_embedding, trg_input) # 在词向量上进行dropout src_emb = tf.nn.dropout(src_emb, KEEP_PROB) trg_emb = tf.nn.dropout(trg_emb, KEEP_PROB) # 使用dynamic_rnn构造编码器 # 编码器读取源句子每个位置的词向量,输出最后一步的隐藏状态enc_state # 因为编码器是一个双层LSTM,因此enc_state是一个包含两个LSTMStateTuple类的tuple, # 每个LSTMStateTuple对应编码器中一层的状态 # enc_outputs是顶层LSTM在每一步的输出,它的维度是[batch_size, max_time, HIDDEN_SIZE] # seq2seq模型中不需要用到enc_outputs,而attention模型会用到它 with tf.variable_scope('encoder'): enc_outputs, enc_state = tf.nn.dynamic_rnn(self.enc_cell, src_emb, src_size, dtype=tf.float32) # 使用dynamic_rnn构造解码器 # 解码器读取目标句子每个位置的词向量,输出的dec_outputs为每一步顶层LSTM的输出 # dec_outputs的维度是[batch_size, max_time, HIDDEN_SIZE] # initial_state=enc_state表示用编码器的输出来初始化第一步的隐藏状态 # 编码器最后编码结束最后的状态为解码器初始化的状态 with tf.variable_scope('decoder'): dec_outputs, _ = tf.nn.dynamic_rnn(self.dec_cell, trg_emb, trg_size, initial_state=enc_state) # 计算解码器每一步的log perplexity # 输出重新转换成shape为[,HIDDEN_SIZE] output = tf.reshape(dec_outputs, [-1, HIDDEN_SIZE]) # 计算解码器每一步的softmax概率值 logits = tf.matmul(output, self.softmax_weight) + self.softmax_bias # 交叉熵损失函数,算loss loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.reshape(trg_label, [-1]), logits=logits) # 在计算平均损失时,需要将填充位置的权重设置为0,以避免无效位置的预测干扰模型的训练 label_weights = tf.sequence_mask(trg_size, maxlen=tf.shape(trg_label)[1], dtype=tf.float32) label_weights = tf.reshape(label_weights, [-1]) cost = tf.reduce_sum(loss * label_weights) cost_per_token = cost / tf.reduce_sum(label_weights) # 定义反向传播操作 trainable_variables = tf.trainable_variables() # 控制梯度大小,定义优化方法和训练步骤 # 算出每个需要更新的值的梯度,并对其进行控制 grads = tf.gradients(cost / tf.to_float(batch_size), trainable_variables) grads, _ = tf.clip_by_global_norm(grads, MAX_GRAD_NORM) # 利用梯度下降优化算法进行优化.学习率为1.0 optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0) # 相当于minimize的第二步,正常来讲所得到的list[grads,vars]由compute_gradients得到,返回的是执行对应变量的更新梯度操作的op train_op = optimizer.apply_gradients(zip(grads, trainable_variables)) return cost_per_token, train_op """ function: 使用给定的模型model上训练一个epoch,并返回全局步数,每训练200步便保存一个checkpoint Parameters: session : 会议 cost_op : 计算loss的操作op train_op: 训练的操作op saver:  保存model的类 step:   训练步数 Returns: """ def run_epoch(session, cost_op, train_op, saver, step): # 训练一个epoch # 重复训练步骤直至遍历完Dataset中所有数据 while True: try: # 运行train_op并计算cost_op的结果也就是损失值,训练数据在main()函数中以Dataset方式提供 cost, _ = session.run([cost_op, train_op]) # 步数为10的倍数进行打印 if step % 10 == 0: print('After %d steps, per token cost is %.3f' % (step, cost)) # 每200步保存一个checkpoint if step % 200 == 0: saver.save(session, CHECKPOINT_PATH, global_step=step) step += 1 except tf.errors.OutOfRangeError: break return step """ function: 主函数 Parameters: Returns: """ def main(): # 定义初始化函数 initializer = tf.random_uniform_initializer(-0.05, 0.05) # 定义训练用的循环神经网络模型 with tf.variable_scope('nmt_model', reuse=None, initializer=initializer): train_model = NMTModel() # 定义输入数据 data = MakeSrcTrgDataset(SRC_TRAIN_DATA, TRG_TRAIN_DATA, BATCH_SIZE) iterator = data.make_initializable_iterator() (src, src_size), (trg_input, trg_label, trg_size) = iterator.get_next() # 定义前向计算图,输入数据以张量形式提供给forward函数 cost_op, train_op = train_model.forward(src, src_size, trg_input, trg_label, trg_size) # 训练模型 # 保存模型 saver = tf.train.Saver() step = 0 with tf.Session() as sess: # 初始化全部变量 tf.global_variables_initializer().run() # 进行NUM_EPOCH轮数 for i in range(NUM_EPOCH): print('In iteration: %d' % (i + 1)) sess.run(iterator.initializer) step = run_epoch(sess, cost_op, train_op, saver, step) if __name__ == '__main__': main() ``` 问题如下,不知道怎么解决,谢谢! Traceback (most recent call last): File "D:\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 1334, in _do_call return fn(*args) File "D:\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 1319, in _run_fn options, feed_dict, fetch_list, target_list, run_metadata) File "D:\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 1407, in _call_tf_sessionrun run_metadata) tensorflow.python.framework.errors_impl.InvalidArgumentError: StringToNumberOp could not correctly convert string: This [[{{node StringToNumber}}]] [[{{node IteratorGetNext}}]] During handling of the above exception, another exception occurred: Traceback (most recent call last): File "D:/Python37/untitled1/train_model.py", line 277, in <module> main() File "D:/Python37/untitled1/train_model.py", line 273, in main step = run_epoch(sess, cost_op, train_op, saver, step) File "D:/Python37/untitled1/train_model.py", line 231, in run_epoch cost, _ = session.run([cost_op, train_op]) File "D:\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 929, in run run_metadata_ptr) File "D:\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 1152, in _run feed_dict_tensor, options, run_metadata) File "D:\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 1328, in _do_run run_metadata) File "D:\Anaconda\envs\tensorflow\lib\site-packages\tensorflow\python\client\session.py", line 1348, in _do_call raise type(e)(node_def, op, message) tensorflow.python.framework.errors_impl.InvalidArgumentError: StringToNumberOp could not correctly convert string: This [[{{node StringToNumber}}]] [[node IteratorGetNext (defined at D:/Python37/untitled1/train_model.py:259) ]]
tensorflow批量读取图片出错
# -*- coding: utf-8 -*- import tensorflow as tf import numpy as np import os import matplotlib.pyplot as plt #训练样本在本地磁盘中的地址 file_dir='/home/lvlulu/Test-Train/Microfibers' # 这里是输入数据的地址 batch_size = 10 def get_files(file_dir): lung_img = []; label_lung_img = []; for file in os.listdir(file_dir): lung_img.append( file_dir + file) label_lung_img.append(1) image_list = np.hstack((lung_img)) label_list = np.hstack((label_lung_img)) temp = np.array([lung_img, label_lung_img]).T #利用shuffle打乱数据 np.random.shuffle(temp) image_list = list(temp[:,0]) label_list = list(temp[:,1]) label_list = [int(i) for i in label_list] return image_list, label_list def get_batch(image,label): image_W, image_H = 221, 181 #batch_size = 10 #将python.list类型转换成tf能够识别的格式 image=tf.cast(image,tf.string) label=tf.cast(label,tf.int32) #产生一个输入队列queue epoch_num = 50 #防止无限循环 input_queue=tf.train.slice_input_producer([image,label], num_epochs=epoch_num) label=input_queue[1] image_contents=tf.read_file(input_queue[0]) #print(image_contents) #将图像解码,不同类型的图像不能混在一起,要么只用jpeg,要么只用png等。 image=tf.image.decode_jpeg(image_contents, channels = 3) #print(image) #将数据预处理,对图像进行旋转、缩放、裁剪、归一化等操作,让计算出的模型更健壮。 image=tf.image.resize_image_with_crop_or_pad(image,image_W,image_H) image=tf.image.per_image_standardization(image) #print(image) #生成batch min_after_dequeue=10 capacity=min_after_dequeue+5*batch_size image_batch,label_batch=tf.train.shuffle_batch( [image,label], batch_size=batch_size, num_threads=64, capacity=capacity, min_after_dequeue=min_after_dequeue ) #重新排列标签,行数为[batch_size] #label_batch=tf.reshape(label_batch,[batch_size]) image_batch = tf.reshape(image_batch,[batch_size,image_W,image_H,3]) image_batch=tf.cast(image_batch,np.float32) #print(image_batch) return image_batch, label_batch if __name__ == "__main__": image_list, label_list = get_files(file_dir) image_batch, label_batch = get_batch(image_list, label_list) print(image_batch) with tf.Session() as sess: ##初始化工作 sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) i = 0 coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) #print(sess.run([image_batch])) print(label_batch) #回收子线程 coord.request_stop() coord.join(threads) ``` ``` Caused by op u'ReadFile', defined at: File "batch.py", line 80, in <module> image_batch, label_batch = get_batch(image_list, label_list) File "batch.py", line 48, in get_batch image_contents=tf.read_file(input_queue[0]) File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_io_ops.py", line 144, in read_file result = _op_def_lib.apply_op("ReadFile", filename=filename, name=name) File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 759, in apply_op op_def=op_def) File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2240, in create_op original_op=self._default_original_op, op_def=op_def) File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1128, in __init__ self._traceback = _extract_stack() NotFoundError (see above for traceback): /home/lvlulu/Test-Train/Microfibers0112.jpg [[Node: ReadFile = ReadFile[_device="/job:localhost/replica:0/task:0/cpu:0"](input_producer/Gather)]] [[Node: Shape_6/_14 = _HostSend[T=DT_INT32, client_terminated=false, recv_device="/job:localhost/replica:0/task:0/gpu:0", send_device="/job:localhost/replica:0/task:0/gpu:0", send_device_incarnation=1, tensor_name="edge_3_Shape_6", _device="/job:localhost/replica:0/task:0/gpu:0"](Shape_6)]] ``` ```
在网上找到一个DQN的神经网络代码。可以运行,但是没有读取模型的部分
代码可以运行,但是没用读取模型的代码,我在网上找了一段时间,还是没有找到教程。自己写的读写代码不能正常工作 这是原代码 ``` import pygame import random from pygame.locals import * import numpy as np from collections import deque import tensorflow as tf import cv2 BLACK = (0 ,0 ,0 ) WHITE = (255,255,255) SCREEN_SIZE = [320,400] BAR_SIZE = [50, 5] BALL_SIZE = [15, 15] # 神经网络的输出 MOVE_STAY = [1, 0, 0,0] MOVE_LEFT = [0, 1, 0,0] MOVE_RIGHT = [0, 0, 1,0] MOVE_RIGHT1=[0,0,0,1] class Game(object): def __init__(self): pygame.init() self.clock = pygame.time.Clock() self.screen = pygame.display.set_mode(SCREEN_SIZE) pygame.display.set_caption('Simple Game') self.ball_pos_x = SCREEN_SIZE[0]//2 - BALL_SIZE[0]/2 self.ball_pos_y = SCREEN_SIZE[1]//2 - BALL_SIZE[1]/2 self.ball_dir_x = -1 # -1 = left 1 = right self.ball_dir_y = -1 # -1 = up 1 = down self.ball_pos = pygame.Rect(self.ball_pos_x, self.ball_pos_y, BALL_SIZE[0], BALL_SIZE[1]) self.bar_pos_x = SCREEN_SIZE[0]//2-BAR_SIZE[0]//2 self.bar_pos = pygame.Rect(self.bar_pos_x, SCREEN_SIZE[1]-BAR_SIZE[1], BAR_SIZE[0], BAR_SIZE[1]) # action是MOVE_STAY、MOVE_LEFT、MOVE_RIGHT # ai控制棒子左右移动;返回游戏界面像素数和对应的奖励。(像素->奖励->强化棒子往奖励高的方向移动) def step(self, action): if action == MOVE_LEFT: self.bar_pos_x = self.bar_pos_x - 2 elif action == MOVE_RIGHT: self.bar_pos_x = self.bar_pos_x + 2 elif action == MOVE_RIGHT1: self.bar_pos_x = self.bar_pos_x + 1 else: pass if self.bar_pos_x < 0: self.bar_pos_x = 0 if self.bar_pos_x > SCREEN_SIZE[0] - BAR_SIZE[0]: self.bar_pos_x = SCREEN_SIZE[0] - BAR_SIZE[0] self.screen.fill(BLACK) self.bar_pos.left = self.bar_pos_x pygame.draw.rect(self.screen, WHITE, self.bar_pos) self.ball_pos.left += self.ball_dir_x * 2 self.ball_pos.bottom += self.ball_dir_y * 3 pygame.draw.rect(self.screen, WHITE, self.ball_pos) if self.ball_pos.top <= 0 or self.ball_pos.bottom >= (SCREEN_SIZE[1] - BAR_SIZE[1]+1): self.ball_dir_y = self.ball_dir_y * -1 if self.ball_pos.left <= 0 or self.ball_pos.right >= (SCREEN_SIZE[0]): self.ball_dir_x = self.ball_dir_x * -1 reward = 0 if self.bar_pos.top <= self.ball_pos.bottom and (self.bar_pos.left < self.ball_pos.right and self.bar_pos.right > self.ball_pos.left): reward = 1 # 击中奖励 elif self.bar_pos.top <= self.ball_pos.bottom and (self.bar_pos.left > self.ball_pos.right or self.bar_pos.right < self.ball_pos.left): reward = -1 # 没击中惩罚 # 获得游戏界面像素 screen_image = pygame.surfarray.array3d(pygame.display.get_surface()) #np.save(r'C:\Users\Administrator\Desktop\game\model\112454.npy',screen_image) pygame.display.update() # 返回游戏界面像素和对应的奖励 return reward, screen_image # learning_rate LEARNING_RATE = 0.99 # 更新梯度 INITIAL_EPSILON = 1.0 FINAL_EPSILON = 0.05 # 测试观测次数 EXPLORE = 500000 OBSERVE = 50000 # 存储过往经验大小 REPLAY_MEMORY = 500000 BATCH = 100 output = 4 # 输出层神经元数。代表3种操作-MOVE_STAY:[1, 0, 0] MOVE_LEFT:[0, 1, 0] MOVE_RIGHT:[0, 0, 1] input_image = tf.placeholder("float", [None, 80, 100, 4]) # 游戏像素 action = tf.placeholder("float", [None, output]) # 操作 # 定义CNN-卷积神经网络 参考:http://blog.topspeedsnail.com/archives/10451 def convolutional_neural_network(input_image): weights = {'w_conv1':tf.Variable(tf.zeros([8, 8, 4, 32])), 'w_conv2':tf.Variable(tf.zeros([4, 4, 32, 64])), 'w_conv3':tf.Variable(tf.zeros([3, 3, 64, 64])), 'w_fc4':tf.Variable(tf.zeros([3456, 784])), 'w_out':tf.Variable(tf.zeros([784, output]))} biases = {'b_conv1':tf.Variable(tf.zeros([32])), 'b_conv2':tf.Variable(tf.zeros([64])), 'b_conv3':tf.Variable(tf.zeros([64])), 'b_fc4':tf.Variable(tf.zeros([784])), 'b_out':tf.Variable(tf.zeros([output]))} conv1 = tf.nn.relu(tf.nn.conv2d(input_image, weights['w_conv1'], strides = [1, 4, 4, 1], padding = "VALID") + biases['b_conv1']) conv2 = tf.nn.relu(tf.nn.conv2d(conv1, weights['w_conv2'], strides = [1, 2, 2, 1], padding = "VALID") + biases['b_conv2']) conv3 = tf.nn.relu(tf.nn.conv2d(conv2, weights['w_conv3'], strides = [1, 1, 1, 1], padding = "VALID") + biases['b_conv3']) conv3_flat = tf.reshape(conv3, [-1, 3456]) fc4 = tf.nn.relu(tf.matmul(conv3_flat, weights['w_fc4']) + biases['b_fc4']) output_layer = tf.matmul(fc4, weights['w_out']) + biases['b_out'] return output_layer # 深度强化学习入门: https://www.nervanasys.com/demystifying-deep-reinforcement-learning/ # 训练神经网络 def train_neural_network(input_image): predict_action = convolutional_neural_network(input_image) argmax = tf.placeholder("float", [None, output]) gt = tf.placeholder("float", [None]) action = tf.reduce_sum(tf.multiply(predict_action, argmax), reduction_indices = 1) cost = tf.reduce_mean(tf.square(action - gt)) optimizer = tf.train.AdamOptimizer(1e-6).minimize(cost) game = Game() D = deque() _, image = game.step(MOVE_STAY) # 转换为灰度值 image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY) # 转换为二值 ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY) input_image_data = np.stack((image, image, image, image), axis = 2) with tf.Session() as sess: sess.run(tf.initialize_all_variables()) saver = tf.train.Saver() n = 0 epsilon = INITIAL_EPSILON while True: action_t = predict_action.eval(feed_dict = {input_image : [input_image_data]})[0] argmax_t = np.zeros([output], dtype=np.int) if(random.random() <= INITIAL_EPSILON): maxIndex = random.randrange(output) else: maxIndex = np.argmax(action_t) argmax_t[maxIndex] = 1 if epsilon > FINAL_EPSILON: epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE for event in pygame.event.get(): #macOS需要事件循环,否则白屏 if event.type == QUIT: pygame.quit() sys.exit() reward, image = game.step(list(argmax_t)) image = cv2.cvtColor(cv2.resize(image, (100, 80)), cv2.COLOR_BGR2GRAY) ret, image = cv2.threshold(image, 1, 255, cv2.THRESH_BINARY) image = np.reshape(image, (80, 100, 1)) input_image_data1 = np.append(image, input_image_data[:, :, 0:3], axis = 2) D.append((input_image_data, argmax_t, reward, input_image_data1)) if len(D) > REPLAY_MEMORY: D.popleft() if n > OBSERVE: minibatch = random.sample(D, BATCH) input_image_data_batch = [d[0] for d in minibatch] argmax_batch = [d[1] for d in minibatch] reward_batch = [d[2] for d in minibatch] input_image_data1_batch = [d[3] for d in minibatch] gt_batch = [] out_batch = predict_action.eval(feed_dict = {input_image : input_image_data1_batch}) for i in range(0, len(minibatch)): gt_batch.append(reward_batch[i] + LEARNING_RATE * np.max(out_batch[i])) optimizer.run(feed_dict = {gt : gt_batch, argmax : argmax_batch, input_image : input_image_data_batch}) input_image_data = input_image_data1 n = n+1 if n % 100 == 0: saver.save(sess, 'D:/lolAI/model/game', global_step = n) # 保存模型 print(n, "epsilon:", epsilon, " " ,"action:", maxIndex, " " ,"reward:", reward) train_neural_network(input_image) ``` 这是我根据教程写的读取模型并且运行的代码 ``` import tensorflow as tf tf.reset_default_graph() with tf.Session() as sess: new_saver = tf.train.import_meta_graph('D:/lolAI/model/game-400.meta') new_saver.restore(sess, tf.train.latest_checkpoint('D:/lolAI/model')) print(sess.run(tf.initialize_all_variables())) ``` 代码我还没有看的很明白,希望大佬给点意见
深度学习图片识别循环停止?
最近在跑深度学习的inceptionV3的时候偶尔会遇到一问题,就是代码在运行到某个时间点时,就停止迭代运算,不知道为什么? ![图片说明](https://img-ask.csdn.net/upload/201811/11/1541904283_461011.png) 上面图是个例子,假设运行到291的step的时候停止了,不在继续运算,但是CPU和GPU是满载的。 下面是代码: ``` # coding=utf-8 import tensorflow as tf import numpy as np import pdb import os from datetime import datetime import slim.inception_model as inception_v3 from create_tf_record import * import tensorflow.contrib.slim as slim labels_nums = 7 # 类别个数 batch_size = 64 # resize_height = 299 # 指定SSS存储图片高度 resize_width = 299 # 指定存储图片宽度 depths = 3 data_shape = [batch_size, resize_height, resize_width, depths] # 定义input_images为图片数据 input_images = tf.placeholder(dtype=tf.float32, shape=[None, resize_height, resize_width, depths], name='input') # 定义input_labels为labels数据 # input_labels = tf.placeholder(dtype=tf.int32, shape=[None], name='label') input_labels = tf.placeholder(dtype=tf.int32, shape=[None, labels_nums], name='label') # 定义dropout的概率 keep_prob = tf.placeholder(tf.float32, name='keep_prob') is_training = tf.placeholder(tf.bool, name='is_training') #config = tf.ConfigProto() #config = tf.ConfigProto() #config.gpu_options.allow_growth = True #tf.Session(config = config) #tf.Session(config=tf.ConfigProto(allow_growth=True)) def net_evaluation(sess, loss, accuracy, val_images_batch, val_labels_batch, val_nums): val_max_steps = int(val_nums / batch_size) val_losses = [] val_accs = [] for _ in range(val_max_steps): val_x, val_y = sess.run([val_images_batch, val_labels_batch]) # print('labels:',val_y) # val_loss = sess.run(loss, feed_dict={x: val_x, y: val_y, keep_prob: 1.0}) # val_acc = sess.run(accuracy,feed_dict={x: val_x, y: val_y, keep_prob: 1.0}) val_loss, val_acc = sess.run([loss, accuracy], feed_dict={input_images: val_x, input_labels: val_y, keep_prob: 1.0, is_training: False}) val_losses.append(val_loss) val_accs.append(val_acc) mean_loss = np.array(val_losses, dtype=np.float32).mean() mean_acc = np.array(val_accs, dtype=np.float32).mean() return mean_loss, mean_acc def step_train(train_op, loss, accuracy, train_images_batch, train_labels_batch, train_nums, train_log_step, val_images_batch, val_labels_batch, val_nums, val_log_step, snapshot_prefix, snapshot): ''' 循环迭代训练过程 :param train_op: 训练op :param loss: loss函数 :param accuracy: 准确率函数 :param train_images_batch: 训练images数据 :param train_labels_batch: 训练labels数据 :param train_nums: 总训练数据 :param train_log_step: 训练log显示间隔 :param val_images_batch: 验证images数据 :param val_labels_batch: 验证labels数据 :param val_nums: 总验证数据 :param val_log_step: 验证log显示间隔 :param snapshot_prefix: 模型保存的路径 :param snapshot: 模型保存间隔 :return: None ''' # 初始化 #init = tf.global_variables_initializer() saver = tf.train.Saver() max_acc = 0.0 #ckpt = tf.train.get_checkpoint_state('D:/can_test/inception v3/') #saver = tf.train.import_meta_graph(ckpt.model_checkpoint_path + '.meta') #tf.reset_default_graph() with tf.Session() as sess: #sess.run(tf.global_variables_initializer())#恢复训练用 #saver = tf.train.import_meta_graph('D://can_test/inception v3/best_models_2_0.7500.ckpt.meta')#恢复训练 #saver.restore(sess, tf.train.latest_checkpoint('D://can_test/inception v3/'))#恢复训练 sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) for i in range(max_steps + 1): batch_input_images, batch_input_labels = sess.run([train_images_batch, train_labels_batch]) _, train_loss = sess.run([train_op, loss], feed_dict={input_images: batch_input_images, input_labels: batch_input_labels, keep_prob: 0.5, is_training: True}) # train测试(这里仅测试训练集的一个batch) if i % train_log_step == 0: train_acc = sess.run(accuracy, feed_dict={input_images: batch_input_images, input_labels: batch_input_labels, keep_prob: 1.0, is_training: False}) print( "%s: Step [%d] train Loss : %f, training accuracy : %g" % ( datetime.now(), i, train_loss, train_acc) ) # val测试(测试全部val数据) if i % val_log_step == 0: mean_loss, mean_acc = net_evaluation(sess, loss, accuracy, val_images_batch, val_labels_batch, val_nums) print( "%s: Step [%d] val Loss : %f, val accuracy : %g" % (datetime.now(), i, mean_loss, mean_acc) ) # 模型保存:每迭代snapshot次或者最后一次保存模型 if i == max_steps: print('-----save:{}-{}'.format(snapshot_prefix, i)) saver.save(sess, snapshot_prefix, global_step=i) # 保存val准确率最高的模型 if mean_acc > max_acc and mean_acc > 0.90: max_acc = mean_acc path = os.path.dirname(snapshot_prefix) best_models = os.path.join(path, 'best_models_{}_{:.4f}.ckpt'.format(i, max_acc)) print('------save:{}'.format(best_models)) saver.save(sess, best_models) coord.request_stop() coord.join(threads) def train(train_record_file, train_log_step, train_param, val_record_file, val_log_step, labels_nums, data_shape, snapshot, snapshot_prefix): ''' :param train_record_file: 训练的tfrecord文件 :param train_log_step: 显示训练过程log信息间隔 :param train_param: train参数 :param val_record_file: 验证的tfrecord文件 :param val_log_step: 显示验证过程log信息间隔 :param val_param: val参数 :param labels_nums: labels数 :param data_shape: 输入数据shape :param snapshot: 保存模型间隔 :param snapshot_prefix: 保存模型文件的前缀名 :return: ''' [base_lr, max_steps] = train_param [batch_size, resize_height, resize_width, depths] = data_shape # 获得训练和测试的样本数 train_nums = get_example_nums(train_record_file) val_nums = get_example_nums(val_record_file) print('train nums:%d,val nums:%d' % (train_nums, val_nums)) # 从record中读取图片和labels数据 # train数据,训练数据一般要求打乱顺序shuffle=True train_images, train_labels = read_records(train_record_file, resize_height, resize_width, type='normalization') train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels, batch_size=batch_size, labels_nums=labels_nums, one_hot=True, shuffle=True) # val数据,验证数据可以不需要打乱数据 val_images, val_labels = read_records(val_record_file, resize_height, resize_width, type='normalization') val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels, batch_size=batch_size, labels_nums=labels_nums, one_hot=True, shuffle=False) # Define the model: with slim.arg_scope(inception_v3.inception_v3_arg_scope()): out, end_points = inception_v3.inception_v3(inputs=input_images, num_classes=labels_nums, dropout_keep_prob=keep_prob, is_training=is_training) # Specify the loss function: tf.losses定义的loss函数都会自动添加到loss函数,不需要add_loss()了 tf.losses.softmax_cross_entropy(onehot_labels=input_labels, logits=out) # 添加交叉熵损失loss=1.6 # slim.losses.add_loss(my_loss) loss = tf.losses.get_total_loss(add_regularization_losses=True) # 添加正则化损失loss=2.2 accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(out, 1), tf.argmax(input_labels, 1)), tf.float32)) # Specify the optimization scheme: optimizer = tf.train.GradientDescentOptimizer(learning_rate=base_lr) # global_step = tf.Variable(0, trainable=False) # learning_rate = tf.train.exponential_decay(0.05, global_step, 150, 0.9) # # optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9) # # train_tensor = optimizer.minimize(loss, global_step) # train_op = slim.learning.create_train_op(loss, optimizer,global_step=global_step) # 在定义训练的时候, 注意到我们使用了`batch_norm`层时,需要更新每一层的`average`和`variance`参数, # 更新的过程不包含在正常的训练过程中, 需要我们去手动像下面这样更新 # 通过`tf.get_collection`获得所有需要更新的`op` update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # 使用`tensorflow`的控制流, 先执行更新算子, 再执行训练 with tf.control_dependencies(update_ops): # create_train_op that ensures that when we evaluate it to get the loss, # the update_ops are done and the gradient updates are computed. # train_op = slim.learning.create_train_op(total_loss=loss,optimizer=optimizer) train_op = slim.learning.create_train_op(total_loss=loss, optimizer=optimizer) # 循环迭代过程 step_train(train_op, loss, accuracy, train_images_batch, train_labels_batch, train_nums, train_log_step, val_images_batch, val_labels_batch, val_nums, val_log_step, snapshot_prefix, snapshot) if __name__ == '__main__': train_record_file = '/home/lab/new_jeremie/train.tfrecords' val_record_file = '/home/lab/new_jeremie/val.tfrecords' #train_record_file = 'D://cancer_v2/data/cancer/train.tfrecords' #val_record_file = 'D://val.tfrecords' train_log_step = 1 base_lr = 0.01 # 学习率 max_steps = 100000 # 迭代次数 train_param = [base_lr, max_steps] val_log_step = 1 snapshot = 2000 # 保存文件间隔 snapshot_prefix = './v3model.ckpt' train(train_record_file=train_record_file, train_log_step=train_log_step, train_param=train_param, val_record_file=val_record_file, val_log_step=val_log_step, #val_log_step=val_log_step, labels_nums=labels_nums, data_shape=data_shape, snapshot=snapshot, snapshot_prefix=snapshot_prefix) ```
求大神跟我解释一下这个程序
``` import paho.mqtt.client as mqtt from concurrent import futures clients=[] nclients=20 mqtt.Client.connected_flag=False #start False #create clients ######################################## for i in range(nclients): cname="Client"+str(i) client= mqtt.Client(cname) clients.append(client) ######################################## ex = futures.ThreadPoolExecutor(max_workers=10)#这个语句的意思是10个进程并发吗? futures object 这个对象是干嘛的 while True: #main loop for i in range(len(clients)): client.loop(0.01) #if not connected then connect.Max connection attempts #at once is set by max_workers#既然并发,为什么还要在这里加loop循环 if not client.connected_flag:#flag set true in on_connect f = ex.submit(Connect,parameter1,parameter2,etc#这个submit是什么意思,后面的参数parameter应该填什么 ``` ``` #!/usr/bin/env python import sys import paho.mqtt.client as mqtt import serial import time from functools import reduce import threading port = "/dev/ttyACM1" broker_adress = "10.0.2.190" sys.path.append("/home/hu/Schreibtisch/Arduino_BA_2.0/Probe_Programmierung/Python-Arduino-Proto-API-v2/arduino") ser = serial.Serial(port, 9600,timeout= 1) gassensor_value = "default_value" voltagesensor_value = "default value" currentsensor_value = "default value" temperaturesensor_value_1 = "default_value" temperaturesensor_value_2 = "default_value" humidity_value = "default_value" air_pressure_value = "default_value" altitude_value = "default_value" sensor_value = [['/CBCU/CB123/inner_space_of_CB/gassensor',gassensor_value], ['/CBCU/CB123/battery/voltagesensor', voltagesensor_value], ['/CBCU/CB123/battery/currentsensor', currentsensor_value], ['/CBCU/CB123/wassertank/temperature', temperaturesensor_value_1], ['/CBCU/CB123/battery/humidity', humidity_value], ['/CBCU/CB123/inner_space/temperature_of_inner_space', temperaturesensor_value_2], ['/CBCU/CB123/inner_space/air_pressure', air_pressure_value], ['/CBCU/CB123/inner_space/altitude', altitude_value]] def char2int(s): return {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9}[s] def mulit_int(x,y): return 10*x+y def str2int(s): if s.find('.') == -1: # 不是浮点数 return reduce(mulit_int, map(char2int, s)) else: # 是浮点数 s1 = s.split('.') s2int = reduce(mulit_int, map(char2int, s1[0])) # 整数部分 s2float = reduce(mulit_int, map(char2int, s1[1])) * 0.1 ** len(s1[1]) # 小数部分 return s2int + s2float ######################################################################### # Callback_1 for relay #on_connect1,on_disconnect1,on_subscribe1on_message_1 ######################################################################### def on_connect1(mqttrelay, obj, flags, rc): if rc != 0: exit(rc) else: mqttrelay.subscribe("qos0/test", 0) def on_disconnect1(mqttrelay, obj, rc): obj = rc def on_subscribe1(mqttrelay, obj, mid, granted_qos): print(mqttrelay.subscribe("qos0/test", 0)) print("Waiting for the subscribed messages") def on_message1(mqttrelay,userdata, message): a = str(message.payload.decode("utf-8")) if (a == "131"): ser.write(b'131') elif(a == "130"): ser.write(b'130') # time.sleep(1) # else: # ser.write(b'0') # time.sleep(1) # else: print("please publish the message 1 or 0") ######################################################################### #Callback_2 for gassensor #on_connect2,on_publish2 ######################################################################### def on_publish2(mqttsensor, obj, mid): print("mid: " + str(mid)) def on_connect2(mqttsensor, userdata, flags, rc): print("Connected with result code " + str(rc)) def on_publish3(mqttgassensor, obj, mid): print("mid: " + str(mid)) def on_connect3(mqttgassensor, userdata, flags, rc): print("Connected with result code " + str(rc)) #create new instance to subscribe the sitution of relay mqttrelay = mqtt.Client("relay_K_12", 1) #create new instance to publish the situation of sensor mqttsensor = mqtt.Client("sensor",1) #create new instance to publish the situation of gassensor mqttgassensor = mqtt.Client("gassensor",1) #the events and callbacks of instance mqttrelais associate with each other: mqttrelay.on_message = on_message1 mqttrelay.on_connect = on_connect1 mqttrelay.on_subscribe = on_subscribe1 mqttrelay.on_disconnect = on_disconnect1 mqttrelay.connect(broker_adress) #the events and callbacks of instance sensor associate with each other: mqttsensor.on_connect = on_connect2 mqttsensor.on_publish = on_publish2 mqttsensor.connect(broker_adress) #the events and callbacks of instance gassensor associate with each other: mqttgassensor.on_connect = on_connect3 mqttgassensor.on_publish = on_publish3 mqttgassensor.connect(broker_adress) def read_Sensor1(): i = 0 while True: temp = ser.readline().decode('ascii') #print(type(temp)) #print(temp) if temp.split(':')[0] == "Voltage": sensor_value[1][1] = temp i = i + 1 elif temp.split(':')[0] == "amperage": sensor_value[2][1] = temp i = i + 1 elif temp.split(':')[0] == "temperature1": sensor_value[3][1] =temp i = i + 1 elif temp.split(':')[0] == "RH": sensor_value[4][1] =temp i = i + 1 elif temp.split(':')[0] == "temperature2": sensor_value[5][1] =temp i = i + 1 elif temp.split(':')[0] =="airpressure": sensor_value[6][1] =temp i = i + 1 elif temp.split(':')[0] == "altitude": sensor_value[7][1] =temp i = i + 1 if (i == 7): break print(sensor_value[1]) mqttsensor.publish("/CBCU/CB123/battery/voltagesensor", sensor_value[1][1], retain=1) print(sensor_value[2]) mqttsensor.publish("/CBCU/CB123/battery/currentsensor", sensor_value[2][1], retain=1) print(sensor_value[3]) mqttsensor.publish("/CBCU/CB123/wassertank/temperature", sensor_value[3][1], retain=1) print(sensor_value[4]) mqttsensor.publish("/CBCU/CB123/battery/humidity", sensor_value[4][1], retain=1) print(sensor_value[5]) mqttsensor.publish("/CBCU/CB123/inner_space/temperature_of_inner_space", sensor_value[5][1], retain=1) print(sensor_value[6]) mqttsensor.publish("/CBCU/CB123/inner_space/air_pressure", sensor_value[6][1], retain=1) print(sensor_value[7]) mqttsensor.publish("/CBCU/CB123/inner_space/altitude", sensor_value[7][1], retain=1) def read_Sensor2(): j = 0 while True: temp = ser.readline().decode('ascii') # print(type(temp)) # print(temp) if temp.split(':')[0] == "Gassesnsor": sensor_value[0][1] = temp j = j + 1 if (j == 1): break print(sensor_value[0]) mqttsensor.publish("/CBCU/CB123/inner_space_of_CB/gassensor", sensor_value[0][1],retain = 1) def loop1(): while True: time.sleep(10) #mqttrelay.loop() mqttsensor.loop() read_Sensor1() time.sleep(10) def loop2(): while True: read_Sensor2() mqttgassensor.loop() def main(): added_thread1 = threading.Thread(target=loop1,name = "thread_1") added_thread2 = threading.Thread(target=loop2,name = "thread_2") added_thread2.start() added_thread1.start() main() ``` 上面是我的程序,求大佬給個點子,自己用多線程試了很多次,就是出錯 Exception in thread thread_1: Traceback (most recent call last): File "/home/hu/PycharmProjects/mqtt_probe/venv/lib/python3.5/site-packages/serial/serialposix.py", line 501, in read 'device reports readiness to read but returned no data ' serial.serialutil.SerialException: device reports readiness to read but returned no data (device disconnected or multiple access on port?) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner self.run() File "/usr/lib/python3.5/threading.py", line 862, in run self._target(*self._args, **self._kwargs) File "/home/hu/mqtt/8_publish_subscribe_for_sensoren_threads.py", line 208, in loop1 read_Sensor1() File "/home/hu/mqtt/8_publish_subscribe_for_sensoren_threads.py", line 126, in read_Sensor1 temp = ser.readline().decode('ascii') File "/home/hu/PycharmProjects/mqtt_probe/venv/lib/python3.5/site-packages/serial/serialposix.py", line 509, in read raise SerialException('read failed: {}'.format(e)) serial.serialutil.SerialException: read failed: device reports readiness to read but returned no data (device disconnected or multiple access on port?) 求大佬們指正
Java学习的正确打开方式
在博主认为,对于入门级学习java的最佳学习方法莫过于视频+博客+书籍+总结,前三者博主将淋漓尽致地挥毫于这篇博客文章中,至于总结在于个人,实际上越到后面你会发现学习的最好方式就是阅读参考官方文档其次就是国内的书籍,博客次之,这又是一个层次了,这里暂时不提后面再谈。博主将为各位入门java保驾护航,各位只管冲鸭!!!上天是公平的,只要不辜负时间,时间自然不会辜负你。 何谓学习?博主所理解的学习,它是一个过程,是一个不断累积、不断沉淀、不断总结、善于传达自己的个人见解以及乐于分享的过程。
程序员必须掌握的核心算法有哪些?
由于我之前一直强调数据结构以及算法学习的重要性,所以就有一些读者经常问我,数据结构与算法应该要学习到哪个程度呢?,说实话,这个问题我不知道要怎么回答你,主要取决于你想学习到哪些程度,不过针对这个问题,我稍微总结一下我学过的算法知识点,以及我觉得值得学习的算法。这些算法与数据结构的学习大多数是零散的,并没有一本把他们全部覆盖的书籍。下面是我觉得值得学习的一些算法以及数据结构,当然,我也会整理一些看过...
大学四年自学走来,这些私藏的实用工具/学习网站我贡献出来了
大学四年,看课本是不可能一直看课本的了,对于学习,特别是自学,善于搜索网上的一些资源来辅助,还是非常有必要的,下面我就把这几年私藏的各种资源,网站贡献出来给你们。主要有:电子书搜索、实用工具、在线视频学习网站、非视频学习网站、软件下载、面试/求职必备网站。 注意:文中提到的所有资源,文末我都给你整理好了,你们只管拿去,如果觉得不错,转发、分享就是最大的支持了。 一、电子书搜索 对于大部分程序员...
linux系列之常用运维命令整理笔录
本博客记录工作中需要的linux运维命令,大学时候开始接触linux,会一些基本操作,可是都没有整理起来,加上是做开发,不做运维,有些命令忘记了,所以现在整理成博客,当然vi,文件操作等就不介绍了,慢慢积累一些其它拓展的命令,博客不定时更新 free -m 其中:m表示兆,也可以用g,注意都要小写 Men:表示物理内存统计 total:表示物理内存总数(total=used+free) use...
比特币原理详解
一、什么是比特币 比特币是一种电子货币,是一种基于密码学的货币,在2008年11月1日由中本聪发表比特币白皮书,文中提出了一种去中心化的电子记账系统,我们平时的电子现金是银行来记账,因为银行的背后是国家信用。去中心化电子记账系统是参与者共同记账。比特币可以防止主权危机、信用风险。其好处不多做赘述,这一层面介绍的文章很多,本文主要从更深层的技术原理角度进行介绍。 二、问题引入 假设现有4个人...
程序员接私活怎样防止做完了不给钱?
首先跟大家说明一点,我们做 IT 类的外包开发,是非标品开发,所以很有可能在开发过程中会有这样那样的需求修改,而这种需求修改很容易造成扯皮,进而影响到费用支付,甚至出现做完了项目收不到钱的情况。 那么,怎么保证自己的薪酬安全呢? 我们在开工前,一定要做好一些证据方面的准备(也就是“讨薪”的理论依据),这其中最重要的就是需求文档和验收标准。一定要让需求方提供这两个文档资料作为开发的基础。之后开发...
网页实现一个简单的音乐播放器(大佬别看。(⊙﹏⊙))
今天闲着无事,就想写点东西。然后听了下歌,就打算写个播放器。 于是乎用h5 audio的加上js简单的播放器完工了。 演示地点演示 html代码如下` music 这个年纪 七月的风 音乐 ` 然后就是css`*{ margin: 0; padding: 0; text-decoration: none; list-...
Python十大装B语法
Python 是一种代表简单思想的语言,其语法相对简单,很容易上手。不过,如果就此小视 Python 语法的精妙和深邃,那就大错特错了。本文精心筛选了最能展现 Python 语法之精妙的十个知识点,并附上详细的实例代码。如能在实战中融会贯通、灵活使用,必将使代码更为精炼、高效,同时也会极大提升代码B格,使之看上去更老练,读起来更优雅。
数据库优化 - SQL优化
以实际SQL入手,带你一步一步走上SQL优化之路!
2019年11月中国大陆编程语言排行榜
2019年11月2日,我统计了某招聘网站,获得有效程序员招聘数据9万条。针对招聘信息,提取编程语言关键字,并统计如下: 编程语言比例 rank pl_ percentage 1 java 33.62% 2 cpp 16.42% 3 c_sharp 12.82% 4 javascript 12.31% 5 python 7.93% 6 go 7.25% 7 p...
通俗易懂地给女朋友讲:线程池的内部原理
餐盘在灯光的照耀下格外晶莹洁白,女朋友拿起红酒杯轻轻地抿了一小口,对我说:“经常听你说线程池,到底线程池到底是个什么原理?”
《奇巧淫技》系列-python!!每天早上八点自动发送天气预报邮件到QQ邮箱
将代码部署服务器,每日早上定时获取到天气数据,并发送到邮箱。 也可以说是一个小型人工智障。 知识可以运用在不同地方,不一定非是天气预报。
经典算法(5)杨辉三角
杨辉三角 是经典算法,这篇博客对它的算法思想进行了讲解,并有完整的代码实现。
英特尔不为人知的 B 面
从 PC 时代至今,众人只知在 CPU、GPU、XPU、制程、工艺等战场中,英特尔在与同行硬件芯片制造商们的竞争中杀出重围,且在不断的成长进化中,成为全球知名的半导体公司。殊不知,在「刚硬」的背后,英特尔「柔性」的软件早已经做到了全方位的支持与支撑,并持续发挥独特的生态价值,推动产业合作共赢。 而对于这一不知人知的 B 面,很多人将其称之为英特尔隐形的翅膀,虽低调,但是影响力却不容小觑。 那么,在...
腾讯算法面试题:64匹马8个跑道需要多少轮才能选出最快的四匹?
昨天,有网友私信我,说去阿里面试,彻底的被打击到了。问了为什么网上大量使用ThreadLocal的源码都会加上private static?他被难住了,因为他从来都没有考虑过这个问题。无独有偶,今天笔者又发现有网友吐槽了一道腾讯的面试题,我们一起来看看。 腾讯算法面试题:64匹马8个跑道需要多少轮才能选出最快的四匹? 在互联网职场论坛,一名程序员发帖求助到。二面腾讯,其中一个算法题:64匹...
面试官:你连RESTful都不知道我怎么敢要你?
干货,2019 RESTful最贱实践
刷了几千道算法题,这些我私藏的刷题网站都在这里了!
遥想当年,机缘巧合入了 ACM 的坑,周边巨擘林立,从此过上了"天天被虐似死狗"的生活… 然而我是谁,我可是死狗中的战斗鸡,智力不够那刷题来凑,开始了夜以继日哼哧哼哧刷题的日子,从此"读题与提交齐飞, AC 与 WA 一色 ",我惊喜的发现被题虐既刺激又有快感,那一刻我泪流满面。这么好的事儿作为一个正直的人绝不能自己独享,经过激烈的颅内斗争,我决定把我私藏的十几个 T 的,阿不,十几个刷题网...
为啥国人偏爱Mybatis,而老外喜欢Hibernate/JPA呢?
关于SQL和ORM的争论,永远都不会终止,我也一直在思考这个问题。昨天又跟群里的小伙伴进行了一番讨论,感触还是有一些,于是就有了今天这篇文。 声明:本文不会下关于Mybatis和JPA两个持久层框架哪个更好这样的结论。只是摆事实,讲道理,所以,请各位看官勿喷。 一、事件起因 关于Mybatis和JPA孰优孰劣的问题,争论已经很多年了。一直也没有结论,毕竟每个人的喜好和习惯是大不相同的。我也看...
白话阿里巴巴Java开发手册高级篇
不久前,阿里巴巴发布了《阿里巴巴Java开发手册》,总结了阿里巴巴内部实际项目开发过程中开发人员应该遵守的研发流程规范,这些流程规范在一定程度上能够保证最终的项目交付质量,通过在时间中总结模式,并推广给广大开发人员,来避免研发人员在实践中容易犯的错误,确保最终在大规模协作的项目中达成既定目标。 无独有偶,笔者去年在公司里负责升级和制定研发流程、设计模板、设计标准、代码标准等规范,并在实际工作中进行...
SQL-小白最佳入门sql查询一
不要偷偷的查询我的个人资料,即使你再喜欢我,也不要这样,真的不好;
项目中的if else太多了,该怎么重构?
介绍 最近跟着公司的大佬开发了一款IM系统,类似QQ和微信哈,就是聊天软件。我们有一部分业务逻辑是这样的 if (msgType = "文本") { // dosomething } else if(msgType = "图片") { // doshomething } else if(msgType = "视频") { // doshomething } else { // doshom...
Nginx 原理和架构
Nginx 是一个免费的,开源的,高性能的 HTTP 服务器和反向代理,以及 IMAP / POP3 代理服务器。Nginx 以其高性能,稳定性,丰富的功能,简单的配置和低资源消耗而闻名。 Nginx 的整体架构 Nginx 里有一个 master 进程和多个 worker 进程。master 进程并不处理网络请求,主要负责调度工作进程:加载配置、启动工作进程及非停升级。worker 进程负责处...
【图解经典算法题】如何用一行代码解决约瑟夫环问题
约瑟夫环问题算是很经典的题了,估计大家都听说过,然后我就在一次笔试中遇到了,下面我就用 3 种方法来详细讲解一下这道题,最后一种方法学了之后保证让你可以让你装逼。 问题描述:编号为 1-N 的 N 个士兵围坐在一起形成一个圆圈,从编号为 1 的士兵开始依次报数(1,2,3…这样依次报),数到 m 的 士兵会被杀死出列,之后的士兵再从 1 开始报数。直到最后剩下一士兵,求这个士兵的编号。 1、方...
吐血推荐珍藏的Visual Studio Code插件
作为一名Java工程师,由于工作需要,最近一个月一直在写NodeJS,这种经历可以说是一部辛酸史了。好在有神器Visual Studio Code陪伴,让我的这段经历没有更加困难。眼看这段经历要告一段落了,今天就来给大家分享一下我常用的一些VSC的插件。 VSC的插件安装方法很简单,只需要点击左侧最下方的插件栏选项,然后就可以搜索你想要的插件了。 下面我们进入正题 Material Theme ...
如何防止抄袭PCB电路板
目录 1、抄板是什么 2、抄板是否属于侵权 3、如何防止抄板 1、抄板是什么 抄板也叫克隆或仿制,是对设计出来的PCB板进行反向技术研究;目前全新的定义:从狭义上来说,抄板仅指对电子产品电路板PCB文件的提取还原和利用文件进行电路板克隆的过程;从广义上来说,抄板不仅包括对电路板文件提取、电路板克隆、电路板仿制等技术过程,而且包括对电路板文件进行修改(即改板)、对电子产品外形模具进行三维...
“狗屁不通文章生成器”登顶GitHub热榜,分分钟写出万字形式主义大作
一、垃圾文字生成器介绍 最近在浏览GitHub的时候,发现了这样一个骨骼清奇的雷人项目,而且热度还特别高。 项目中文名:狗屁不通文章生成器 项目英文名:BullshitGenerator 根据作者的介绍,他是偶尔需要一些中文文字用于GUI开发时测试文本渲染,因此开发了这个废话生成器。但由于生成的废话实在是太过富于哲理,所以最近已经被小伙伴们给玩坏了。 他的文风可能是这样的: 你发现,...
程序员:我终于知道post和get的区别
是一个老生常谈的话题,然而随着不断的学习,对于以前的认识有很多误区,所以还是需要不断地总结的,学而时习之,不亦说乎
《程序人生》系列-这个程序员只用了20行代码就拿了冠军
你知道的越多,你不知道的越多 点赞再看,养成习惯GitHub上已经开源https://github.com/JavaFamily,有一线大厂面试点脑图,欢迎Star和完善 前言 这一期不算《吊打面试官》系列的,所有没前言我直接开始。 絮叨 本来应该是没有这期的,看过我上期的小伙伴应该是知道的嘛,双十一比较忙嘛,要值班又要去帮忙拍摄年会的视频素材,还得搞个程序员一天的Vlog,还要写BU...
加快推动区块链技术和产业创新发展,2019可信区块链峰会在京召开
11月8日,由中国信息通信研究院、中国通信标准化协会、中国互联网协会、可信区块链推进计划联合主办,科技行者协办的2019可信区块链峰会将在北京悠唐皇冠假日酒店开幕。   区块链技术被认为是继蒸汽机、电力、互联网之后,下一代颠覆性的核心技术。如果说蒸汽机释放了人类的生产力,电力解决了人类基本的生活需求,互联网彻底改变了信息传递的方式,区块链作为构造信任的技术有重要的价值。   1...
Python 植物大战僵尸代码实现(2):植物卡片选择和种植
这篇文章要介绍的是: - 上方植物卡片栏的实现。 - 点击植物卡片,鼠标切换为植物图片。 - 鼠标移动时,判断当前在哪个方格中,并显示半透明的植物作为提示。
Java世界最常用的工具类库
Apache Commons Apache Commons有很多子项目 Google Guava 参考博客
程序员把地府后台管理系统做出来了,还有3.0版本!12月7号最新消息:已在开发中有github地址
第一幕:缘起 听说阎王爷要做个生死簿后台管理系统,我们派去了一个程序员…… 996程序员做的梦: 第一场:团队招募 为了应对地府管理危机,阎王打算找“人”开发一套地府后台管理系统,于是就在地府总经办群中发了项目需求。 话说还是中国电信的信号好,地府都是满格,哈哈!!! 经常会有外行朋友问:看某网站做的不错,功能也简单,你帮忙做一下? 而这次,面对这样的需求,这个程序员...
网易云6亿用户音乐推荐算法
网易云音乐是音乐爱好者的集聚地,云音乐推荐系统致力于通过 AI 算法的落地,实现用户千人千面的个性化推荐,为用户带来不一样的听歌体验。 本次分享重点介绍 AI 算法在音乐推荐中的应用实践,以及在算法落地过程中遇到的挑战和解决方案。 将从如下两个部分展开: AI算法在音乐推荐中的应用 音乐场景下的 AI 思考 从 2013 年 4 月正式上线至今,网易云音乐平台持续提供着:乐屏社区、UGC...
【技巧总结】位运算装逼指南
位算法的效率有多快我就不说,不信你可以去用 10 亿个数据模拟一下,今天给大家讲一讲位运算的一些经典例子。不过,最重要的不是看懂了这些例子就好,而是要在以后多去运用位运算这些技巧,当然,采用位运算,也是可以装逼的,不信,你往下看。我会从最简单的讲起,一道比一道难度递增,不过居然是讲技巧,那么也不会太难,相信你分分钟看懂。 判断奇偶数 判断一个数是基于还是偶数,相信很多人都做过,一般的做法的代码如下...
为什么要学数据结构?
一、前言 在可视化化程序设计的今天,借助于集成开发环境可以很快地生成程序,程序设计不再是计算机专业人员的专利。很多人认为,只要掌握几种开发工具就可以成为编程高手,其实,这是一种误解。要想成为一个专业的开发人员,至少需要以下三个条件: 1) 能够熟练地选择和设计各种数据结构和算法 2) 至少要能够熟练地掌握一门程序设计语言 3) 熟知所涉及的相关应用领域的知识 其中,后两个条件比较容易实现,而第一个...
金山办公上市,雷军心愿了却!
作者 | 胡巍巍 出品 | CSDN(ID:CSDNnews) 11月17日,大周末的,雷军微博发了个重磅消息: “明天将是里程碑式的一天,金山办公终于成功在科创板挂牌上市了! 从1988年金山创办到今天,WPS走了整整31年。 从1999年以金山办公为主体准备上市算起,这一天,我们等了20年。 WPS和金山的历程,这是一个坚持梦想并最终取得胜利的励志故事。期待大家的祝福!”...
8年经验面试官详解 Java 面试秘诀
作者 |胡书敏 责编 | 刘静 出品 | CSDN(ID:CSDNnews) 本人目前在一家知名外企担任架构师,而且最近八年来,在多家外企和互联网公司担任Java技术面试官,前后累计面试了有两三百位候选人。在本文里,就将结合本人的面试经验,针对Java初学者、Java初级开发和Java开发,给出若干准备简历和准备面试的建议。 Java程序员准备和投递简历的实...
面试官如何考察你的思维方式?
1.两种思维方式在求职面试中,经常会考察这种问题:北京有多少量特斯拉汽车?某胡同口的煎饼摊一年能卖出多少个煎饼?深圳有多少个产品经理?一辆公交车里能装下多少个乒乓球?一个正常成年人有多少根头发?这类估算问题,被称为费米问题,是以科学家费米命名的。为什么面试会问这种问题呢?这类问题能把两类人清楚地区分出来。一类是具有文科思维的人,擅长赞叹和模糊想象,它主要依靠的是人的第一反应和直觉,比如小孩...
17张图带你解析红黑树的原理!保证你能看懂!
二叉查找树 由于红黑树本质上就是一棵二叉查找树,所以在了解红黑树之前,咱们先来看下二叉查找树。 二叉查找树(Binary Search Tree),也称有序二叉树(ordered binary tree),排序二叉树(sorted binary tree),是指一棵空树或者具有下列性质的二叉树: 若任意结点的左子树不空,则左子树上所有结点的值均小于它的根结点的值; 若任意结点的...
相关热词 c# 二进制截断字符串 c#实现窗体设计器 c#检测是否为微信 c# plc s1200 c#里氏转换原则 c# 主界面 c# do loop c#存为组套 模板 c# 停掉协程 c# rgb 读取图片
立即提问