TensorFlow 自编码器 placeholder错误 10C
 import numpy as np
import tensorflow as tf


def xavier_init(fan_in, fan_out, constant=1):
    low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
    high = constant * np.sqrt(6.0 / (fan_in + fan_out))
    return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high,
                             dtype=tf.float32)


class AdditiveGaussionNoiseAutoencoder(object):
    def __init__(self, n_input, n_hidden, transfer_function=tf.nn.relu,
                 optimizer=tf.train.AdamOptimizer(), scale=0.1):
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.transfer = transfer_function
        self.scale = tf.placeholder(tf.float32)  
        self.training_scale = scale
        network_weights = self._initialize_weights()
        self.weights = network_weights

        self.x = tf.placeholder(tf.float32, [None, self.n_input])

        self.hidden = self.transfer(tf.add(tf.matmul(
            self.x + scale * tf.random_normal((n_input,)),
            self.weights['w1']), self.weights['b1']))
        self.reconstruction = tf.add(tf.matmul(self.hidden,
                                               self.weights['w2']), self.weights['b2'])


        self.cost = tf.sqrt(tf.reduce_mean(tf.pow(tf.subtract(
            self.reconstruction, self.x), 2.0))) 
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)

    def _initialize_weights(self):
        all_weights = dict()
        all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
        all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
        all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
        all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
        return all_weights

    def partial_fit(self, X):
        cost, opt = self.sess.run((self.cost, self.optimizer),
                                  feed_dict={self.x: X, self.scale: self.training_scale})
        return cost

    def calc_total_cost(self, X):
        return self.sess.run(self.cost,
                             feed_dict={self.x: X, self.scale: self.training_scale})

    def transform(self, X):
        return self.sess.run(self.hidden,
                             feed_dict={self.x: X, self.scale: self.training_scale})

    def generate(self, hidden=None):
        if hidden is None:
            hidden = np.random.normal(size=self.weights['b1'])
        return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})

    def reconstruct(self, X):
        return self.sess.run(self.reconstruction, feed_dict={self.x: X, self.scale: self.training_scale})

    def getweights(self):
        return self.sess.run(self.weights['w1'])

    def getbiases(self):
        return self.sess.run(self.weights['b1'])

 import numpy as np
import tensorflow as tf
from DSAE import AdditiveGaussionNoiseAutoencoder
import xlrd
import sklearn.preprocessing as prep

#数据读取,可转换为csv文件,好处理,参见ConvertData
train_input = "/Users/Patrick/Desktop/traffic_data/train_500010092_input.xls"
train_output = "/Users/Patrick/Desktop/traffic_data/train_500010092_output.xls"
test_input = "/Users/Patrick/Desktop/traffic_data/test_500010092_input.xls"
test_output = "/Users/Patrick/Desktop/traffic_data/test_500010092_output.xls"
book_train_input = xlrd.open_workbook(train_input, encoding_override='utf-8')
book_train_output = xlrd.open_workbook(train_output, encoding_override='utf-8')
book_test_input = xlrd.open_workbook(test_input, encoding_override='utf-8')
book_test_output = xlrd.open_workbook(test_output, encoding_override='utf-8')
sheet_train_input = book_train_input.sheet_by_index(0)
sheet_train_output = book_train_output.sheet_by_index(0)
sheet_test_input = book_test_input.sheet_by_index(0)
sheet_test_output = book_test_output.sheet_by_index(0)
data_train_input = np.asarray([sheet_train_input.row_values(i)
                             for i in range(2, sheet_train_input.nrows)])
data_train_output = np.asarray(([sheet_train_output.row_values(i)
                             for i in range(2, sheet_train_output.ncols)]))
data_test_input = np.asarray([sheet_test_input.row_values(i)
                            for i in range(2, sheet_test_input.nrows)])
data_test_output = np.asarray(([sheet_test_output.row_values(i)
                              for i in range(2, sheet_test_output.ncols)]))


def standard_scale(X_train, X_test):
    preprocessor=prep.StandardScaler().fit(X_train)
    X_train=preprocessor.transform(X_train)
    X_test=preprocessor.transform(X_test)
    return X_train, X_test


X_train, X_test = standard_scale(data_train_input, data_test_input)


def get_block_form_data(data, batch_size, k):
    #start_index =0
    start_index = k * batch_size
    return data[start_index:(start_index+batch_size)]


training_epochs = 20
batch_size = 288
n_samples = sheet_test_output.nrows
display_step = 1
stack_size = 3
hidden_size = [10, 8, 10]


sdae = []
for i in range(stack_size):
    if i == 0:
        ae = AdditiveGaussionNoiseAutoencoder(n_input=12, n_hidden=hidden_size[i], transfer_function=tf.nn.relu, optimizer=tf.train.AdamOptimizer(learning_rate=0.01), scale=0.01)
        ae._initialize_weights()
        sdae.append(ae)
    else:
        ae = AdditiveGaussionNoiseAutoencoder(n_input=hidden_size[i-1],
                                              n_hidden=hidden_size[i],
                                              transfer_function=tf.nn.relu,
                                              optimizer=tf.train.AdamOptimizer(learning_rate=0.01),
                                              scale=0.01)
        ae._initialize_weights()
        sdae.append(ae)
W = []
b = []
hidden_feacture = []   
X_train = np.array([0])
for j in range(stack_size):
    if j == 0:
        X_train = data_train_input
        X_test = data_test_input
    else:
        X_train_pre = X_train
        X_train = sdae[j-1].transform(X_train_pre)
        print(X_train.shape)
        hidden_feacture.append(X_train)

    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(n_samples / batch_size)
        for i in range(total_batch):
            batch_xs = get_block_form_data(X_train, batch_size, i)

            cost = sdae[j].partial_fit(batch_xs)
            avg_cost += cost / n_samples * batch_size

        if epoch % display_step == 0:
            print("Epoch:", '%04d' % (epoch + 1), "cost=",
                  "{:.9f}".format(avg_cost))

    weight = sdae[j].getweights()
    W.append(weight)
    print(np.shape(W))
    b.append(sdae[j].getbiases())
    print(np.shape(b))

然后报错如下:

  File "/Applications/PyCharm.app/Contents/helpers/pydev/pydev_run_in_console.py", line 53, in run_file
    pydev_imports.execfile(file, globals, locals)  # execute the script
  File "/Applications/PyCharm.app/Contents/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
    exec(compile(contents+"\n", file, 'exec'), glob, loc)
  File "/Users/Patrick/PycharmProjects/DSAE-SVM/DLmain.py", line 80, in <module>
    X_train = sdae[j-1].transform(X_train_pre)
  File "/Users/Patrick/PycharmProjects/DSAE-SVM/DSAE.py", line 70, in transform
    feed_dict={self.x: X, self.scale: self.training_scale})
  File "/Users/Patrick/anaconda3/envs/tensorflow/lib/python3.4/site-packages/tensorflow/python/client/session.py", line 905, in run
    run_metadata_ptr)
  File "/Users/Patrick/anaconda3/envs/tensorflow/lib/python3.4/site-packages/tensorflow/python/client/session.py", line 1113, in _run
    str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (18143, 3) for Tensor 'Placeholder_1:0', which has shape '(?, 12)'
PyDev console: starting.
Python 3.4.5 |Continuum Analytics, Inc.| (default, Jul  2 2016, 17:47:57) 
[GCC 4.2.1 Compatible Apple LLVM 4.2 (clang-425.0.28)] on darwin

实在是不知道该如何修改palceholder的shape 求帮忙讲解

1个回答

tf.placeholder(tf.float32, [x,y])
x和y就是你的shape

你的这行代码:
self.scale = tf.placeholder(tf.float32)

你可以改这行的shape看看。我也是刚学,不知道对不对。如果错了,不好意思

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
tensorflow :AttributeError: module 'tensorflow' has no attribute 'placeholder'
![图片说明](https://img-ask.csdn.net/upload/201912/23/1577068222_34445.png) I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudart64_100.dll Traceback (most recent call last): File "G:/pycharm/mycode-ZZQ/TensorflowDemo/demo1.py", line 7, in <module> a_ph = tf.placeholder(tf.float32, name='variable_a') AttributeError: module 'tensorflow' has no attribute 'placeholder' 大佬有知道怎么回事嘛,我卸载又安装还是不管用
tensorflow中placeholder传值的问题
作者原话是不规定有多少sample,但是每个sample大小是784。可是我查阅了placeholder的文档,他的第二个参数不应该是个矩阵大小吗?![图片说明](https://img-ask.csdn.net/upload/201810/21/1540101431_820855.jpg)![图片说明](https://img-ask.csdn.net/upload/201810/21/1540101497_790540.jpg)
tensorflow的InvalidArgumentError报错问题(placeholder)
书上的一个实例,不知道为什么报错。请各位大神帮忙解答一下。 程序如下: from tensorflow.examples.tutorials.mnist import input_data mnist=input_data.read_data_sets("MNIST_data/",one_hot=True) print(mnist.train.images.shape,mnist.train.labels.shape) print(mnist.test.images.shape,mnist.test.labels.shape) print(mnist.validation.images.shape,mnist.validation.labels.shape) import tensorflow as tf sess=tf.InteractiveSession() x=tf.placeholder(tf.float32,[None,784]) W=tf.Variable(tf.zeros([784,10])) b=tf.Variable(tf.zeros([10])) y=tf.nn.softmax(tf.matmul(x,W)+b) y_=tf.placeholder(tf.float32,[None,10]) cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_* tf.log(y),reduction_indices=[1])) train_step=tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) tf.global_variables_initializer().run() for i in range(1000): batch_xs,batch_ys=mnist.train.next_batch(100) train_step.run({x:batch_xs,y_:batch_ys}) correct_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1)) accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) print(accuracy.eval({x:mnist.test.images,y:mnist.test.labels})) 报错如下: runfile('D:/project/Spyder/MNIST_data.py', wdir='D:/project/Spyder') Extracting MNIST_data/train-images-idx3-ubyte.gz Extracting MNIST_data/train-labels-idx1-ubyte.gz Extracting MNIST_data/t10k-images-idx3-ubyte.gz Extracting MNIST_data/t10k-labels-idx1-ubyte.gz (55000, 784) (55000, 10) (10000, 784) (10000, 10) (5000, 784) (5000, 10) D:\soft\Anaconda3\lib\site-packages\tensorflow\python\client\session.py:1645: UserWarning: An interactive session is already active. This can cause out-of-memory errors in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s). warnings.warn('An interactive session is already active. This can ' Traceback (most recent call last): File "<ipython-input-2-3b25b2404fa0>", line 1, in <module> runfile('D:/project/Spyder/MNIST_data.py', wdir='D:/project/Spyder') File "D:\soft\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 705, in runfile execfile(filename, namespace) File "D:\soft\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 102, in execfile exec(compile(f.read(), filename, 'exec'), namespace) File "D:/project/Spyder/MNIST_data.py", line 29, in <module> print(accuracy.eval({x:mnist.test.images,y:mnist.test.labels})) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 680, in eval return _eval_using_default_session(self, feed_dict, self.graph, session) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 4951, in _eval_using_default_session return session.run(tensors, feed_dict) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 877, in run run_metadata_ptr) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1100, in _run feed_dict_tensor, options, run_metadata) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1272, in _do_run run_metadata) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1291, in _do_call raise type(e)(node_def, op, message) InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_3' with dtype float and shape [?,10] [[Node: Placeholder_3 = Placeholder[dtype=DT_FLOAT, shape=[?,10], _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]] [[Node: Mean_3/_29 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_18_Mean_3", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]] Caused by op 'Placeholder_3', defined at: File "D:\soft\Anaconda3\lib\site-packages\spyder\utils\ipython\start_kernel.py", line 268, in <module> main() File "D:\soft\Anaconda3\lib\site-packages\spyder\utils\ipython\start_kernel.py", line 264, in main kernel.start() File "D:\soft\Anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 478, in start self.io_loop.start() File "D:\soft\Anaconda3\lib\site-packages\zmq\eventloop\ioloop.py", line 177, in start super(ZMQIOLoop, self).start() File "D:\soft\Anaconda3\lib\site-packages\tornado\ioloop.py", line 888, in start handler_func(fd_obj, events) File "D:\soft\Anaconda3\lib\site-packages\tornado\stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "D:\soft\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 440, in _handle_events self._handle_recv() File "D:\soft\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 472, in _handle_recv self._run_callback(callback, msg) File "D:\soft\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 414, in _run_callback callback(*args, **kwargs) File "D:\soft\Anaconda3\lib\site-packages\tornado\stack_context.py", line 277, in null_wrapper return fn(*args, **kwargs) File "D:\soft\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 283, in dispatcher return self.dispatch_shell(stream, msg) File "D:\soft\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 233, in dispatch_shell handler(stream, idents, msg) File "D:\soft\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 399, in execute_request user_expressions, allow_stdin) File "D:\soft\Anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 208, in do_execute res = shell.run_cell(code, store_history=store_history, silent=silent) File "D:\soft\Anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 537, in run_cell return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) File "D:\soft\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2728, in run_cell interactivity=interactivity, compiler=compiler, result=result) File "D:\soft\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2856, in run_ast_nodes if self.run_code(code, result): File "D:\soft\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2910, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-2-3b25b2404fa0>", line 1, in <module> runfile('D:/project/Spyder/MNIST_data.py', wdir='D:/project/Spyder') File "D:\soft\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 705, in runfile execfile(filename, namespace) File "D:\soft\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 102, in execfile exec(compile(f.read(), filename, 'exec'), namespace) File "D:/project/Spyder/MNIST_data.py", line 20, in <module> y_=tf.placeholder(tf.float32,[None,10]) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\ops\array_ops.py", line 1735, in placeholder return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 5928, in placeholder "Placeholder", dtype=dtype, shape=shape, name=name) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper op_def=op_def) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\util\deprecation.py", line 454, in new_func return func(*args, **kwargs) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3155, in create_op op_def=op_def) File "D:\soft\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 1717, in __init__ self._traceback = tf_stack.extract_stack() InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_3' with dtype float and shape [?,10] [[Node: Placeholder_3 = Placeholder[dtype=DT_FLOAT, shape=[?,10], _device="/job:localhost/replica:0/task:0/device:GPU:0"]()]] [[Node: Mean_3/_29 = _Recv[client_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0", send_device="/job:localhost/replica:0/task:0/device:GPU:0", send_device_incarnation=1, tensor_name="edge_18_Mean_3", tensor_type=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"]()]]
tensorflow debug 调试错误
# -*- coding: utf-8 -*- """ Created on Sat Oct 28 15:40:51 2017 @author: Administrator """ import tensorflow as tf from tensorflow.python import debug as tf_debug '''载入数据''' from aamodule import input_data mnist = input_data.read_data_sets('d://MNIST',one_hot=True) '''构建回归模型''' #定义回归模型 x = tf.placeholder(tf.float32,[None,784]) y = tf.placeholder(tf.float32,[None,10]) W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) y_ = tf.matmul(x,W) + b #预测值 #定义损失函数和优化器 cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=y_,labels=y) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) '''训练模型''' sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) sess = tf_debug.LocalCLIDebugWrapperSession(sess,ui_type='readline') #sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan) #Train for i in range(1000): batch_xs,batch_ys = mnist.train.next_batch(100) sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys}) #评估训练好的模型 correct_prediction = tf.equal(tf.argmax(y_,1),tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) #计算模型在测试集上的准确率 print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})) 加入sess = tf_debug.LocalCLIDebugWrapperSession(sess,ui_type='readline')后就运行不了了,ValueError: Exhausted all fallback ui_types. ``` ```
tensorflow test mnist程序出现报错,求大神指教!!!
错误提示:tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,10] [[node Placeholder_1 (defined at e:/python/xuexi/tensorflow/tf5_2_mnist_test.py:10) ]]![图片说明](https://img-ask.csdn.net/upload/201908/06/1565091994_787987.png) 这是按照一个例题做的,已经确认了好多次,数据的type和shape都是没有问题的,不知道为什么还报错
tensorflow当中的loss里面的logits可不可以是placeholder
我使用tensorflow实现手写数字识别,我希望softmax_cross_entropy_with_logits里面的logits先用一个placeholder表示,然后在计算的时候再通过计算出的值再传给placeholder,但是会报错ValueError: No gradients provided for any variable, check your graph for ops that do not support gradients。我知道直接把logits那里改成outputs就可以了,但是如果我一定要用logits的结果先是一个placeholder,我应该怎么解决呢。 ``` import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("/home/as/下载/resnet-152_mnist-master/mnist_dataset", one_hot=True) from tensorflow.contrib.layers import fully_connected x = tf.placeholder(dtype=tf.float32,shape=[None,784]) y = tf.placeholder(dtype=tf.float32,shape=[None,1]) hidden1 = fully_connected(x,100,activation_fn=tf.nn.elu, weights_initializer=tf.random_normal_initializer()) hidden2 = fully_connected(hidden1,200,activation_fn=tf.nn.elu, weights_initializer=tf.random_normal_initializer()) hidden3 = fully_connected(hidden2,200,activation_fn=tf.nn.elu, weights_initializer=tf.random_normal_initializer()) outputs = fully_connected(hidden3,10,activation_fn=None, weights_initializer=tf.random_normal_initializer()) a = tf.placeholder(tf.float32,[None,10]) loss = tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=a) reduce_mean_loss = tf.reduce_mean(loss) equal_result = tf.equal(tf.argmax(outputs,1),tf.argmax(y,1)) cast_result = tf.cast(equal_result,dtype=tf.float32) accuracy = tf.reduce_mean(cast_result) train_op = tf.train.AdamOptimizer(0.001).minimize(reduce_mean_loss) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(30000): xs,ys = mnist.train.next_batch(128) result = outputs.eval(feed_dict={x:xs}) sess.run(train_op,feed_dict={a:result,y:ys}) print(i) ```
tensorflow训练网络报错Invalid argument
##1.问题 程序报错,提示:Invalid argument: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,24] ##2.代码 ``` import time import numpy as np import pandas as pd import tensorflow as tf import matplotlib.pyplot as plt # import dataset input_Dir = 'E:/data/input_H.csv' output_Dir = 'E:/data/output_H.csv' x_data = pd.read_csv(input_Dir, header = None) y_data = pd.read_csv(output_Dir, header = None) x_data = x_data.values y_data = y_data.values x_data = x_data.astype('float32') y_data = y_data.astype('float32') print("DATASET READY") # from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=1) row, column = x_train.shape row = float(row) # define structure of neural network n_hidden_1 = 250 n_hidden_2 = 128 n_input = 250 n_classes = 24 #initialize parameters x = tf.placeholder(tf.float32, [None, n_input]) y = tf.placeholder(tf.float32, [None, n_classes]) keep_prob = tf.placeholder(tf.float32) stddev = 0.1 weights = { 'w1': tf.Variable(tf.random_normal([n_input, n_hidden_1], stddev=stddev)), 'w2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], stddev=stddev)), 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], stddev=stddev)) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1], stddev=stddev)), 'b2': tf.Variable(tf.random_normal([n_hidden_2], stddev=stddev)), 'out': tf.Variable(tf.random_normal([n_classes], stddev=stddev)) } print("NETWORK READY") # forward propagation def multilayer_perceptron(_X, _weights, _biases): layer_1 = tf.nn.leaky_relu(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1'])) layer_2 = tf.nn.leaky_relu(tf.add(tf.matmul(layer_1, _weights['w2']), _biases['b2'])) return (tf.add(tf.matmul(layer_2, _weights['out']), _biases['out'])) # pred = multilayer_perceptron(x, weights, biases) cost = tf.reduce_mean(tf.square(y - pred)) optm = tf.train.GradientDescentOptimizer(learning_rate=0.03).minimize(cost) init = tf.global_variables_initializer() print("FUNCTIONS READY") n_epochs = 100000 batch_size = 512 n_batches = np.int(np.ceil(row / batch_size)) def fetch_batch(epoch, batch_index, batch_size): # 随机获取小批量数据 np.random.seed(epoch * n_batches + batch_index) indices = np.random.randint(row, size = batch_size) return x_train[indices], y_train[indices] iter = 10000 sess = tf.Session() sess.run(tf.global_variables_initializer()) feeds_test = {x: x_test, y: y_test, keep_prob: 1} for epoch in range(n_epochs): # 总共循环次数 for batch_index in range(n_batches): x_batch, y_batch = fetch_batch(epoch, batch_index, batch_size) feeds_train = {x: x_batch, y: y_batch, keep_prob: 1} sess.run(optm, feed_dict=feeds_train) print("EPOCH %d HAS FINISHED" % (epoch)) print("COST %d :" % (epoch)) print(sess.run(cost),feed_dict=feeds_train) print("\n") sess.close() print("FINISHED") ``` ##3.报错信息 Traceback (most recent call last): File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1356, in _do_call return fn(*args) File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1341, in _run_fn options, feed_dict, fetch_list, target_list, run_metadata) File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\client\session.py", line 1429, in _call_tf_sessionrun run_metadata) tensorflow.python.framework.errors_impl.InvalidArgumentError: 2 root error(s) found. (0) Invalid argument: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,24] [[{{node Placeholder_1}}]] [[Mean/_7]] (1) Invalid argument: You must feed a value for placeholder tensor 'Placeholder_1' with dtype float and shape [?,24] [[{{node Placeholder_1}}]] 0 successful operations. 0 derived errors ignored. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Users\Administrator\AppData\Local\Programs\Python\Python36\lib\site-packages\IPython\core\interactiveshell.py", line 3296, in run_code exec(code_obj, self.user_global_ns, self.user_ns) File "<ipython-input-2-762bc58e4306>", line 1, in <module> runfile('C:/Users/Administrator/Desktop/main/demo3.py', wdir='C:/Users/Administrator/Desktop/main') File "E:\Program Files\PyCharm 2019.1.3\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile #求问问题出在什么地方?
使用tensorflow的API dataset遇到memoryerror
使用Tensorflow的API dataset的时候遇到了memoryerror,可是我是使用官方推荐的占位符的方法啊,我的系统是ubuntu 18.0.4,tensorflow 的版本是1.13.1,Python3.6,先上代码: ``` def main(_): if FLAGS.self_test: train_data, train_labels = fake_data(256) validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE) test_data, test_labels = fake_data(EVAL_BATCH_SIZE) num_epochs = 1 else: stft_training, mfcc_training, labels_training = joblib.load(open(FLAGS.input, mode='rb')) stft_training = numpy.array(stft_training) mfcc_training = numpy.array(mfcc_training) labels_training = numpy.array(labels_training) stft_shape = stft_training.shape stft_shape = (None, stft_shape[1], stft_shape[2]) mfcc_shape = mfcc_training.shape mfcc_shape = (None, mfcc_shape[1], mfcc_shape[2]) labels_shape = labels_training.shape labels_shape = (None) stft_placeholder = tf.placeholder(stft_training.dtype, stft_shape) labels_placeholder = tf.placeholder(labels_training.dtype, labels_shape) mfcc_placeholder = tf.placeholder(mfcc_training.dtype, mfcc_shape) dataset_training = tf.data.Dataset.from_tensor_slices((stft_placeholder, mfcc_placeholder, labels_placeholder)) dataset_training = dataset_training .apply( tf.data.experimental.shuffle_and_repeat(len(stft_training), None)) dataset_training = dataset_training .batch(BATCH_SIZE) dataset_training = dataset_training .prefetch(1) iterator_training = dataset_training.make_initializable_iterator() next_element_training = iterator_training.get_next() num_epochs = NUM_EPOCHS train_size = labels_training.shape[0] stft = tf.placeholder( data_type(), shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WEITH, NUM_CHANNELS)) mfcc = tf.placeholder( data_type(), shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WEITH, NUM_CHANNELS)) labels = tf.placeholder(tf.int64, shape=(BATCH_SIZE,)) model = BRN(stft, mfcc) config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: tf.global_variables_initializer().run() train_writer = tf.summary.FileWriter(log_dir + 'train', sess.graph) converter = tf.lite.TFLiteConverter.from_session(sess, [stft,mfcc], [logits]) tflite_model = converter.convert() open("BRN.tflite", "wb").write(tflite_model) print('Initialized!') sess.run(iterator_training.initializer, feed_dict={stft_placeholder:stft_training, mfcc_placeholder:stft_training, labels_placeholder:stft_training}) ``` 报错信息: ![图片说明](https://img-ask.csdn.net/upload/201907/21/1563699144_423650.png)
tensorflow 里loss 出现nan问题 新手问题
大家好, 新手刚刚学,IDE:spyder 预测一个停车场车辆的驶出率, 输入(时间和车辆驶入率)二维。 训练的数据就是,驶出率car/min,时间(时间:1代表一天,从凌晨10秒=10/24/3600的时候开始到晚上23点多),驶入率car/min, 只建了一层的hidden layer,然后print loss是都是nan... 不知道哪里出了问题,是因为层太简单了么?还是激活函数有问题呢? 看网上说排除零的影响,我把输入数和输出数都+1,变得非零了也还是nan... 代码如下: ``` import tensorflow as tf import numpy as np import pandas as pd data=pd.read_csv('0831new.csv') date=data['date'] erate=data['erate'] x=pd.concat([date,erate],axis=1) drate=data['drate'] y=np.array(drate) x=np.array(x) y=y.reshape([7112,1]) x=x+1 y=y+1 z=[] def add_layer(inputs, in_size, out_size, activation_function=None): # add one more layer and return the output of this layer Weights = tf.Variable(tf.random_normal([in_size, out_size])) biases = tf.Variable(tf.zeros([1, out_size]) + 0.01) Wx_plus_b = tf.matmul(inputs, Weights) + biases if activation_function is None: outputs = Wx_plus_b else: outputs = activation_function(Wx_plus_b) return outputs xs = tf.placeholder(tf.float32, [None, 2]) ys = tf.placeholder(tf.float32, [None, 1]) l1 = add_layer(xs, 2, 5, activation_function=tf.nn.tanh) prediction = add_layer(l1,5, 1, activation_function=None) loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1,0])) train_step= tf.train.GradientDescentOptimizer(0.001).minimize(loss) if int((tf.__version__).split('.')[1]) < 12: init = tf.initialize_all_variables() else: init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) for i in range(1000): # training sess.run(train_step, feed_dict={xs: x, ys: y}) if i % 25 == 0: # to see the step improvement print('loss:',sess.run(loss, feed_dict={xs:x, ys:y})) z.append(loss) ``` ![图片说明](https://img-ask.csdn.net/upload/201909/20/1568984044_999083.png) 帮忙给件建议吧~ 谢谢
python placeholder函数的问题
import tensorflow as tf b=tf.Variable(tf.zeros([100])) w=tf.Variable(tf.random_uniform([784,100],-1,1)) x=tf.placeholder(name="x") 出现如下错误: TypeError: placeholder() takes at least 1 argument (1 given) 请教如何解决
Tensorflow中进行图像随机resize为什么会失败
初学Tensorflow,望见谅,最近在学习数据増广,发现在进行图像大小随机缩放时会出错,代码如下: ``` img = tf.placeholder(tf.uint8,shape=[2048,1024,3]) #随机获取Scale scale = tf.random_uniform([1], minval=0.5, maxval=2.0, dtype=tf.float32, seed=None) #求resize后的图像大小 h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[0]), scale)) w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(img)[1]), scale)) #进行resize new_shape = tf.squeeze(tf.stack([h_new, w_new]), squeeze_dims=[1]) img = tf.image.resize_images(img, size=new_shape) ``` 但是我发现,resize以后的img的高和宽竟然是none ![图片说明](https://img-ask.csdn.net/upload/201910/24/1571929669_942601.png) 另外,如果写成如下,也不行 ![图片说明](https://img-ask.csdn.net/upload/201910/24/1571929410_737680.png) 但是如果将resize的大小限定死,是可以的,这和网上大部分网页上是一致的 ![图片说明](https://img-ask.csdn.net/upload/201910/24/1571929641_46797.png) 所以问问各位大神,这是为什么啊,为什么不能随机resize一张图啊。
tensorflow RNN LSTM代码运行不正确?
报错显示是ValueError: None values not supported. 在cross_entropy处有问题。谢谢大家 ``` #7.2 RNN import tensorflow as tf #tf.reset_default_graph() from tensorflow.examples.tutorials.mnist import input_data #载入数据集 mnist = input_data.read_data_sets("MNIST_data/", one_hot = True) #输入图片是28*28 n_inputs = 28 #输入一行,一行有28个数据 max_time = 28 #一共28行 lstm_size = 100 #隐层单元 n_classes = 10 #10个分量 batch_size = 50 #每批次50个样本 n_batch = mnist.train.num_examples//batch_size #计算共由多少个批次 #这里的none表示第一个维度可以是任意长度 x = tf.placeholder(tf.float32, [batch_size, 784]) #正确的标签 y = tf.placeholder(tf.float32, [batch_size, 10]) #初始化权值 weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes], stddev = 0.1)) #初始化偏置 biases = tf.Variable(tf.constant(0.1, shape = [n_classes])) #定义RNN网络 def RNN(X, weights, biases): #input = [batch_size, max_size, n_inputs] inputs = tf.reshape(X, [-1, max_time, n_inputs]) #定义LSTM基本CELL lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(lstm_size) #final_state[0]是cell_state #final_state[1]是hidden_state outputs, final_state = tf.nn.dynamic_rnn(lstm_cell, inputs, dtype = tf.float32) results = tf.nn.softmax(tf.matmul(final_state[1], weights) + biases) #计算RNN的返回结果 prediction = RNN(x, weights, biases) #损失函数 cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels = y,logits = prediction)) #使用AdamOptimizer进行优化 train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) #结果存放在一个布尔型列表中 correct_prediction = tf.equal(tf.argmax(y, 1),tf.argmax(prediction, 1)) #求准确率 accuracy = tf.reduce_mean(tf.cast(correct_precdition,tf.float32)) #初始化 init = tf.global_variable_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(6): for batch in range(n_batch): batch_xs,batch_ys=mnist.train.next_batch(batch_size) sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys}) acc = sess.run(accuracy, feed_dict={x:mnist.test.images,y:mnist.test.labels}) print('Iter' + str(epoch) + ',Testing Accuracy = ' + str(acc)) ```
关于tensorflow训练自己的tfrecord数据集问题
import os import tensorflow as tf from PIL import Image import matplotlib.pyplot as plt import readfileTFRecord import input_data_record def weight_varible(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) print("Loading Done!") sess = tf.InteractiveSession() # paras W_conv1 = weight_varible([5, 5, 1, 32]) b_conv1 = bias_variable([32]) # conv layer-1 x = tf.placeholder(tf.float32, [None, 784]) x_image = tf.reshape(x, [-1, 28, 28, 1]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # conv layer-2 W_conv2 = weight_varible([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # full connection W_fc1 = weight_varible([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # dropout keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # output layer: softmax W_fc2 = weight_varible([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2) y_ = tf.placeholder(tf.float32, [None, 10]) # model training cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess.run(tf.initialize_all_variables()) img, label = readfileTFRecord.read_and_decode("train_min.tfrecords") img_batch, label_batch = tf.train.shuffle_batch([img, label], batch_size=3, capacity=30, min_after_dequeue=9) #img_batch,label_batch = input_data_record.get_batch(img,label,28,28,3,30) init = tf.initialize_all_variables() #with tf.Session() as sess: sess.run(init) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess,coord=coord) try: for i in range(30): if coord.should_stop(): break val, l= sess.run([img_batch, label_batch]) #l = to_categorical(l, 12) train_accuacy = accuracy.eval(feed_dict={x: val, y_: l, keep_prob: 1.0}) print("step %d, training accuracy %g"%(i, train_accuacy)) sess.graph.finalize() train_step.run(feed_dict = {x: val, y_: l, keep_prob: 1.0}) print(val.shape, l) except tf.errors.OutOfRangeError: print('Done training --epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close() 报错: ValueError: Cannot feed value of shape (3, 28, 28, 1) for Tensor u'Placeholder:0', which has shape '(?, 784)'
ng-alian 组件se学习必填提示报错
ng-alain se 使用多个验证提示错误 ```html <form nz-form #f="ngForm" se-container gutter="32"> <se label="App Key" [error]="{ required: '请填写', pattern: '只能包含a-z, 0-9之间'}" required> <input type="text" nz-input [(ngModel)]="i.ak" name="ak" required pattern="^[a-z0-9]*$" placeholder="必填项,且只能包含a-z, 0-9之间"> </se> <se label="App Secret" [error]="{ required: '请填写', pattern: '只能包含0-9之间'}" required> <input type="text" nz-input [(ngModel)]="i.sk" name="sk" required maxlength="32" pattern="^[0-9]*$" placeholder="必填项,且只能包含0-9之间"> </se> <se label="App z" error="必填" required> <input type="text" nz-input [(ngModel)]="i.z" name="z" required maxlength="32" placeholder="必填项"> </se> <se> <button nz-button nzType="primary" [disabled]="f.invalid">Save</button> </se> </form> ``` 学习复制用例,发现使用error="xx";提示是对的 使用[error]="{request:'xxx',...}"这个指挥提示[object,object] 初学者请大神帮忙看看是否有遇到过这个问题
为啥elemen-UI的input控件会独占一行该怎么解决?
![图片说明](https://img-ask.csdn.net/upload/201912/28/1577523927_742950.png) 如图,input框独占一行,但是不能调整他的长度 ``` <template> <div> <el-input placeholder="ID" v-model="ID" clearable></el-input><!-- ID --> <el-input placeholder="药名" v-model="Name" clearable></el-input><!-- 药名 --> <el-input placeholder="来源" v-model="Source" clearable></el-input><br><!-- 来源 --> <el-input placeholder="药性" v-model="Characteristic" clearable></el-input><!-- 药性 --> <el-input placeholder="功能主治" v-model="FunctionalIndications" clearable></el-input><!-- 功能主治 --> <el-button type="primary" icon="el-icon-search">查询</el-button> </div> </template> <script> export default { data() { return { ID: '', Name:'', Source:'', Characteristic:'', FunctionalIndications:'' } } } </script> <style> .el-input{ width: 200px; padding: 2mm; } .el-button{ height: 50; width: 200; padding: 15; } </style> ```
初学Tensorflow搭建简单的神经网络,无法理解报错,求助!
import tensorflow as tf import numpy as np def add_layer(input_data,input_size,output_size,activation_function=None): Weights=tf.Variable(tf.random.normal([input_size,output_size])) biases=tf.Variable(tf.zeros(None,output_size)+0.1) Wx_plus_biases=tf.matmul(input_data,Weights) if activation_function==None: output=Wx_plus_biases else: output=activation_function(output) return output x_data=np.linspace(-1,1,300)[:,np.newaxis] noise=np.random.normal(0.0,0.05,x_data) y_data=np.square(x_data)-0.5+noise xs=tf.compat.v1.placeholder(tf.float32,[None,1]) ys=tf.compat.v1.placeholder(tf.float32,[None,1]) l1=add_layer(x_data,1,10,activation_function=tf.nn.relu) prediction=add_layer(l1,10,1,activation_function=None) loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-precdition), reduction_indices=[1])) train_op=tf.train.GradientDicentOptimizer(0.1).minize(loss) init=tf.global_variables_initializer() for i in range(1000): sess.run(train_op,feed_dict={xs:x_data,ys:y_data}) if i%50: print (sess.run(loss,feed_dict={xs:x_data,ys:y_data})) ``` 以上是源码,下面是报错 Traceback (most recent call last): File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1877, in zeros tensor_shape.TensorShape(shape)) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 326, in _tensor_shape_tensor_conversion_function "Cannot convert a partially known TensorShape to a Tensor: %s" % s) ValueError: Cannot convert a partially known TensorShape to a Tensor: <unknown> During handling of the above exception, another exception occurred: Traceback (most recent call last): File "Easy_network.py", line 17, in <module> l1=add_layer(x_data,1,10,activation_function=tf.nn.relu) File "Easy_network.py", line 5, in add_layer biases=tf.Variable(tf.zeros(None,output_size)+0.1) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1880, in zeros shape = ops.convert_to_tensor(shape, dtype=dtypes.int32) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1087, in convert_to_tensor return convert_to_tensor_v2(value, dtype, preferred_dtype, name) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1145, in convert_to_tensor_v2 as_ref=False) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1224, in internal_convert_to_tensor ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 305, in _constant_tensor_conversion_function return constant(v, dtype=dtype, name=name) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 246, in constant allow_broadcast=True) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 284, in _constant_impl allow_broadcast=allow_broadcast)) File "/home/vdarkknightx/.local/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py", line 454, in make_tensor_proto raise ValueError("None values not supported.") ValueError: None values not supported. 初学者求助!!
麻烦大神帮我看看为什么这段jsp代码里,为什么点击按键没有触发对应的方法,也捕获不了文本框的内容?
![图片说明](https://img-ask.csdn.net/upload/202001/01/1577839493_854964.png) +++++++++ ![图片说明](https://img-ask.csdn.net/upload/202001/01/1577839506_403309.png) +++++++++ ![图片说明](https://img-ask.csdn.net/upload/202001/01/1577839517_477553.png) +++++++ ``` <%@ page contentType="text/html;charset=UTF-8" language="java" %> <html> <head> <title>更新角色信息</title> <script src="https://code.jquery.com/jquery-3.4.1.min.js"></script> </head> <body> <script> function msgUpdate() { alert("wwww"); var updateId = ${"#updateId"}; var updateName = ${"#updateName"}; alert(updateName); $.get("${pageContext.request.contextPath}/insert/byId", {updateId:updateId,updateName:updateName},"json"); } </script> <form> <input id="updateId" type="text" placeholder="请输入id"/><br> <input id="updateName" type="text" placeholder="请输入name"/><br> <input id="updatedBtn" type="button" value="提交" onclick="msgUpdate()"/> </form> </body> </html> ``` +++++ ``` <%@ page contentType="text/html;charset=UTF-8" language="java" %> <html> <head> <title>新建用户</title> <script src="https://code.jquery.com/jquery-3.4.1.min.js"></script> </head> <body> <script> function msgInsert() { var insertId = $("#insertId").val(); var insertName = $("#insertName").val(); // 取值 var result = $("#result").val(); alert(insertId); $.get("${pageContext.request.contextPath}/ic/insertUser", {insertId:insertId,insertName:insertName},"json"); } </script> <form> <input id="insertId" placeholder="请输入id" type="text"/><span id="check"></span><br> <input id="insertName" placeholder="请输入用户名" type="text"/><br> <input id="insertBtn" type="button" value="提交" onclick="msgInsert()"/> </form><br> <a href="./userList/findAll">跳转到用户列表</a> <h3>${requestScope.result}</h3> <h3>${result}</h3> </body> </html> ``` ++++++++++ update可以说是从insert复制粘贴过去的,但是一直没有我想要的效果 | 函数不能触发,也捕获不了文本框里的内容 | 请问是为什么?
爬虫福利二 之 妹子图网MM批量下载
爬虫福利一:27报网MM批量下载    点击 看了本文,相信大家对爬虫一定会产生强烈的兴趣,激励自己去学习爬虫,在这里提前祝:大家学有所成! 目标网站:妹子图网 环境:Python3.x 相关第三方模块:requests、beautifulsoup4 Re:各位在测试时只需要将代码里的变量 path 指定为你当前系统要保存的路径,使用 python xxx.py 或IDE运行即可。
Java学习的正确打开方式
在博主认为,对于入门级学习java的最佳学习方法莫过于视频+博客+书籍+总结,前三者博主将淋漓尽致地挥毫于这篇博客文章中,至于总结在于个人,实际上越到后面你会发现学习的最好方式就是阅读参考官方文档其次就是国内的书籍,博客次之,这又是一个层次了,这里暂时不提后面再谈。博主将为各位入门java保驾护航,各位只管冲鸭!!!上天是公平的,只要不辜负时间,时间自然不会辜负你。 何谓学习?博主所理解的学习,它
程序员必须掌握的核心算法有哪些?
由于我之前一直强调数据结构以及算法学习的重要性,所以就有一些读者经常问我,数据结构与算法应该要学习到哪个程度呢?,说实话,这个问题我不知道要怎么回答你,主要取决于你想学习到哪些程度,不过针对这个问题,我稍微总结一下我学过的算法知识点,以及我觉得值得学习的算法。这些算法与数据结构的学习大多数是零散的,并没有一本把他们全部覆盖的书籍。下面是我觉得值得学习的一些算法以及数据结构,当然,我也会整理一些看过
大学四年自学走来,这些私藏的实用工具/学习网站我贡献出来了
大学四年,看课本是不可能一直看课本的了,对于学习,特别是自学,善于搜索网上的一些资源来辅助,还是非常有必要的,下面我就把这几年私藏的各种资源,网站贡献出来给你们。主要有:电子书搜索、实用工具、在线视频学习网站、非视频学习网站、软件下载、面试/求职必备网站。 注意:文中提到的所有资源,文末我都给你整理好了,你们只管拿去,如果觉得不错,转发、分享就是最大的支持了。 一、PDF搜索网站推荐 对于大部
linux系列之常用运维命令整理笔录
本博客记录工作中需要的linux运维命令,大学时候开始接触linux,会一些基本操作,可是都没有整理起来,加上是做开发,不做运维,有些命令忘记了,所以现在整理成博客,当然vi,文件操作等就不介绍了,慢慢积累一些其它拓展的命令,博客不定时更新 顺便拉下票,我在参加csdn博客之星竞选,欢迎投票支持,每个QQ或者微信每天都可以投5票,扫二维码即可,http://m234140.nofollow.ax.
比特币原理详解
一、什么是比特币 比特币是一种电子货币,是一种基于密码学的货币,在2008年11月1日由中本聪发表比特币白皮书,文中提出了一种去中心化的电子记账系统,我们平时的电子现金是银行来记账,因为银行的背后是国家信用。去中心化电子记账系统是参与者共同记账。比特币可以防止主权危机、信用风险。其好处不多做赘述,这一层面介绍的文章很多,本文主要从更深层的技术原理角度进行介绍。 二、问题引入  假设现有4个人
程序员接私活怎样防止做完了不给钱?
首先跟大家说明一点,我们做 IT 类的外包开发,是非标品开发,所以很有可能在开发过程中会有这样那样的需求修改,而这种需求修改很容易造成扯皮,进而影响到费用支付,甚至出现做完了项目收不到钱的情况。 那么,怎么保证自己的薪酬安全呢? 我们在开工前,一定要做好一些证据方面的准备(也就是“讨薪”的理论依据),这其中最重要的就是需求文档和验收标准。一定要让需求方提供这两个文档资料作为开发的基础。之后开发
网页实现一个简单的音乐播放器(大佬别看。(⊙﹏⊙))
今天闲着无事,就想写点东西。然后听了下歌,就打算写个播放器。 于是乎用h5 audio的加上js简单的播放器完工了。 欢迎 改进 留言。 演示地点跳到演示地点 html代码如下`&lt;!DOCTYPE html&gt; &lt;html&gt; &lt;head&gt; &lt;title&gt;music&lt;/title&gt; &lt;meta charset="utf-8"&gt
Python十大装B语法
Python 是一种代表简单思想的语言,其语法相对简单,很容易上手。不过,如果就此小视 Python 语法的精妙和深邃,那就大错特错了。本文精心筛选了最能展现 Python 语法之精妙的十个知识点,并附上详细的实例代码。如能在实战中融会贯通、灵活使用,必将使代码更为精炼、高效,同时也会极大提升代码B格,使之看上去更老练,读起来更优雅。 1. for - else 什么?不是 if 和 else 才
数据库优化 - SQL优化
前面一篇文章从实例的角度进行数据库优化,通过配置一些参数让数据库性能达到最优。但是一些“不好”的SQL也会导致数据库查询变慢,影响业务流程。本文从SQL角度进行数据库优化,提升SQL运行效率。 判断问题SQL 判断SQL是否有问题时可以通过两个表象进行判断: 系统级别表象 CPU消耗严重 IO等待严重 页面响应时间过长
2019年11月中国大陆编程语言排行榜
2019年11月2日,我统计了某招聘网站,获得有效程序员招聘数据9万条。针对招聘信息,提取编程语言关键字,并统计如下: 编程语言比例 rank pl_ percentage 1 java 33.62% 2 c/c++ 16.42% 3 c_sharp 12.82% 4 javascript 12.31% 5 python 7.93% 6 go 7.25% 7
通俗易懂地给女朋友讲:线程池的内部原理
餐厅的约会 餐盘在灯光的照耀下格外晶莹洁白,女朋友拿起红酒杯轻轻地抿了一小口,对我说:“经常听你说线程池,到底线程池到底是个什么原理?”我楞了一下,心里想女朋友今天是怎么了,怎么突然问出这么专业的问题,但做为一个专业人士在女朋友面前也不能露怯啊,想了一下便说:“我先给你讲讲我前同事老王的故事吧!” 大龄程序员老王 老王是一个已经北漂十多年的程序员,岁数大了,加班加不动了,升迁也无望,于是拿着手里
经典算法(5)杨辉三角
写在前面: 我是 扬帆向海,这个昵称来源于我的名字以及女朋友的名字。我热爱技术、热爱开源、热爱编程。技术是开源的、知识是共享的。 这博客是对自己学习的一点点总结及记录,如果您对 Java、算法 感兴趣,可以关注我的动态,我们一起学习。 用知识改变命运,让我们的家人过上更好的生活。 目录一、杨辉三角的介绍二、杨辉三角的算法思想三、代码实现1.第一种写法2.第二种写法 一、杨辉三角的介绍 百度
腾讯算法面试题:64匹马8个跑道需要多少轮才能选出最快的四匹?
昨天,有网友私信我,说去阿里面试,彻底的被打击到了。问了为什么网上大量使用ThreadLocal的源码都会加上private static?他被难住了,因为他从来都没有考虑过这个问题。无独有偶,今天笔者又发现有网友吐槽了一道腾讯的面试题,我们一起来看看。 腾讯算法面试题:64匹马8个跑道需要多少轮才能选出最快的四匹? 在互联网职场论坛,一名程序员发帖求助到。二面腾讯,其中一个算法题:64匹
面试官:你连RESTful都不知道我怎么敢要你?
面试官:了解RESTful吗? 我:听说过。 面试官:那什么是RESTful? 我:就是用起来很规范,挺好的 面试官:是RESTful挺好的,还是自我感觉挺好的 我:都挺好的。 面试官:… 把门关上。 我:… 要干嘛?先关上再说。 面试官:我说出去把门关上。 我:what ?,夺门而去 文章目录01 前言02 RESTful的来源03 RESTful6大原则1. C-S架构2. 无状态3.统一的接
为啥国人偏爱Mybatis,而老外喜欢Hibernate/JPA呢?
关于SQL和ORM的争论,永远都不会终止,我也一直在思考这个问题。昨天又跟群里的小伙伴进行了一番讨论,感触还是有一些,于是就有了今天这篇文。 声明:本文不会下关于Mybatis和JPA两个持久层框架哪个更好这样的结论。只是摆事实,讲道理,所以,请各位看官勿喷。 一、事件起因 关于Mybatis和JPA孰优孰劣的问题,争论已经很多年了。一直也没有结论,毕竟每个人的喜好和习惯是大不相同的。我也看
SQL-小白最佳入门sql查询一
一 说明 如果是初学者,建议去网上寻找安装Mysql的文章安装,以及使用navicat连接数据库,以后的示例基本是使用mysql数据库管理系统; 二 准备前提 需要建立一张学生表,列分别是id,名称,年龄,学生信息;本示例中文章篇幅原因SQL注释略; 建表语句: CREATE TABLE `student` ( `id` int(11) NOT NULL AUTO_INCREMENT, `
项目中的if else太多了,该怎么重构?
介绍 最近跟着公司的大佬开发了一款IM系统,类似QQ和微信哈,就是聊天软件。我们有一部分业务逻辑是这样的 if (msgType = "文本") { // dosomething } else if(msgType = "图片") { // doshomething } else if(msgType = "视频") { // doshomething } else { // dosho
【图解经典算法题】如何用一行代码解决约瑟夫环问题
约瑟夫环问题算是很经典的题了,估计大家都听说过,然后我就在一次笔试中遇到了,下面我就用 3 种方法来详细讲解一下这道题,最后一种方法学了之后保证让你可以让你装逼。 问题描述:编号为 1-N 的 N 个士兵围坐在一起形成一个圆圈,从编号为 1 的士兵开始依次报数(1,2,3…这样依次报),数到 m 的 士兵会被杀死出列,之后的士兵再从 1 开始报数。直到最后剩下一士兵,求这个士兵的编号。 1、方
致 Python 初学者
文章目录1. 前言2. 明确学习目标,不急于求成,不好高骛远3. 在开始学习 Python 之前,你需要做一些准备2.1 Python 的各种发行版2.2 安装 Python2.3 选择一款趁手的开发工具3. 习惯使用IDLE,这是学习python最好的方式4. 严格遵从编码规范5. 代码的运行、调试5. 模块管理5.1 同时安装了py2/py35.2 使用Anaconda,或者通过IDE来安装模
“狗屁不通文章生成器”登顶GitHub热榜,分分钟写出万字形式主义大作
一、垃圾文字生成器介绍 最近在浏览GitHub的时候,发现了这样一个骨骼清奇的雷人项目,而且热度还特别高。 项目中文名:狗屁不通文章生成器 项目英文名:BullshitGenerator 根据作者的介绍,他是偶尔需要一些中文文字用于GUI开发时测试文本渲染,因此开发了这个废话生成器。但由于生成的废话实在是太过富于哲理,所以最近已经被小伙伴们给玩坏了。 他的文风可能是这样的: 你发现,
程序员:我终于知道post和get的区别
IT界知名的程序员曾说:对于那些月薪三万以下,自称IT工程师的码农们,其实我们从来没有把他们归为我们IT工程师的队伍。他们虽然总是以IT工程师自居,但只是他们一厢情愿罢了。 此话一出,不知激起了多少(码农)程序员的愤怒,却又无可奈何,于是码农问程序员。 码农:你知道get和post请求到底有什么区别? 程序员:你看这篇就知道了。 码农:你月薪三万了? 程序员:嗯。 码农:你是怎么做到的? 程序员:
《程序人生》系列-这个程序员只用了20行代码就拿了冠军
你知道的越多,你不知道的越多 点赞再看,养成习惯GitHub上已经开源https://github.com/JavaFamily,有一线大厂面试点脑图,欢迎Star和完善 前言 这一期不算《吊打面试官》系列的,所有没前言我直接开始。 絮叨 本来应该是没有这期的,看过我上期的小伙伴应该是知道的嘛,双十一比较忙嘛,要值班又要去帮忙拍摄年会的视频素材,还得搞个程序员一天的Vlog,还要写BU
加快推动区块链技术和产业创新发展,2019可信区块链峰会在京召开
      11月8日,由中国信息通信研究院、中国通信标准化协会、中国互联网协会、可信区块链推进计划联合主办,科技行者协办的2019可信区块链峰会将在北京悠唐皇冠假日酒店开幕。   区块链技术被认为是继蒸汽机、电力、互联网之后,下一代颠覆性的核心技术。如果说蒸汽机释放了人类的生产力,电力解决了人类基本的生活需求,互联网彻底改变了信息传递的方式,区块链作为构造信任的技术有重要的价值。   1
程序员把地府后台管理系统做出来了,还有3.0版本!12月7号最新消息:已在开发中有github地址
第一幕:缘起 听说阎王爷要做个生死簿后台管理系统,我们派去了一个程序员…… 996程序员做的梦: 第一场:团队招募 为了应对地府管理危机,阎王打算找“人”开发一套地府后台管理系统,于是就在地府总经办群中发了项目需求。 话说还是中国电信的信号好,地府都是满格,哈哈!!! 经常会有外行朋友问:看某网站做的不错,功能也简单,你帮忙做一下? 而这次,面对这样的需求,这个程序员
网易云6亿用户音乐推荐算法
网易云音乐是音乐爱好者的集聚地,云音乐推荐系统致力于通过 AI 算法的落地,实现用户千人千面的个性化推荐,为用户带来不一样的听歌体验。 本次分享重点介绍 AI 算法在音乐推荐中的应用实践,以及在算法落地过程中遇到的挑战和解决方案。 将从如下两个部分展开: AI 算法在音乐推荐中的应用 音乐场景下的 AI 思考 从 2013 年 4 月正式上线至今,网易云音乐平台持续提供着:乐屏社区、UGC
【技巧总结】位运算装逼指南
位算法的效率有多快我就不说,不信你可以去用 10 亿个数据模拟一下,今天给大家讲一讲位运算的一些经典例子。不过,最重要的不是看懂了这些例子就好,而是要在以后多去运用位运算这些技巧,当然,采用位运算,也是可以装逼的,不信,你往下看。我会从最简单的讲起,一道比一道难度递增,不过居然是讲技巧,那么也不会太难,相信你分分钟看懂。 判断奇偶数 判断一个数是基于还是偶数,相信很多人都做过,一般的做法的代码如下
日均350000亿接入量,腾讯TubeMQ性能超过Kafka
整理 | 夕颜出品 | AI科技大本营(ID:rgznai100) 【导读】近日,腾讯开源动作不断,相继开源了分布式消息中间件TubeMQ,基于最主流的 OpenJDK8开发的
8年经验面试官详解 Java 面试秘诀
    作者 | 胡书敏 责编 | 刘静 出品 | CSDN(ID:CSDNnews) 本人目前在一家知名外企担任架构师,而且最近八年来,在多家外企和互联网公司担任Java技术面试官,前后累计面试了有两三百位候选人。在本文里,就将结合本人的面试经验,针对Java初学者、Java初级开发和Java开发,给出若干准备简历和准备面试的建议。   Java程序员准备和投递简历的实
面试官如何考察你的思维方式?
1.两种思维方式在求职面试中,经常会考察这种问题:北京有多少量特斯拉汽车? 某胡同口的煎饼摊一年能卖出多少个煎饼? 深圳有多少个产品经理? 一辆公交车里能装下多少个乒乓球? 一
so easy! 10行代码写个"狗屁不通"文章生成器
前几天,GitHub 有个开源项目特别火,只要输入标题就可以生成一篇长长的文章。背后实现代码一定很复杂吧,里面一定有很多高深莫测的机器学习等复杂算法不过,当我看了源代码之后这程序不到50
知乎高赞:中国有什么拿得出手的开源软件产品?(整理自本人原创回答)
知乎高赞:中国有什么拿得出手的开源软件产品? 在知乎上,有个问题问“中国有什么拿得出手的开源软件产品(在 GitHub 等社区受欢迎度较好的)?” 事实上,还不少呢~ 本人于2019.7.6进行了较为全面的 回答 - Bravo Yeung,获得该问题下回答中得最高赞(236赞和1枚专业勋章),对这些受欢迎的 Github 开源项目分类整理如下: 分布式计算、云平台相关工具类 1.SkyWalk
相关热词 如何提升c#开发能力 矩阵乘法c# c#调用谷歌浏览器 c# 去空格去转义符 c#用户登录窗体代码 c# 流 c# linux 可视化 c# mvc 返回图片 c# 像素空间 c# 日期 最后一天
立即提问