'Datasets' object has no attribute 'train_step'

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os

BATAH_SIZE = 200
LEARNING_RATE_BASE = 0.1
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 50000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"

def backward(mnist):

x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
y = mnist_forward.forward(x, REGULARIZER)
global_step = tf.Variable(0, trainable=False)

ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1))
cem = tf.reduce_mean(ce)
loss = cem + tf.add_n(tf.get_collection('losses'))

learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATAH_SIZE,
                                           LEARNING_RATE_DECAY, staircase=True)

train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_step, ema_op]):
    train_op = tf.no_op(name='train')

saver = tf.train.Saver()

with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    for i in range(STEPS):
        xs, ys = mnist.train_step.next_batch(BATAH_SIZE)
        _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
        if i % 1000 == 0:
            print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
            saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

def main():
mnist = input_data.read_data_sets("./data/", one_hot=True)
backward(mnist)

if name == '__main__':
main()

运行程序后报错:

File "C:/Users/98382/PycharmProjects/minst/mnist_backward.py", line 54, in
main()
File "C:/Users/98382/PycharmProjects/minst/mnist_backward.py", line 51, in main
backward(mnist)
File "C:/Users/98382/PycharmProjects/minst/mnist_backward.py", line 43, in backward
xs, ys = mnist.train_step.next_batch(BATAH_SIZE)
AttributeError: 'Datasets' object has no attribute 'train_step'

2个回答

请把xs, ys = mnist.train_step.next_batch(BATAH_SIZE)修改为xs, ys = mnist.train.next_batch(BATAH_SIZE),就可以正常运行了

可以查看mnist的属性,如果没有train _step,可以找并且属性里有next_batch(),程序就可以了

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
运行mixmatch源码CIFAR10数据集时报错AttributeError: 'CIFAR10' object has no attribute 'targets',是怎么回事?
1. 在运行mixmatch程序的时候,用torchvision.datasets载入CIFAT10的时候出现AttributeError: 'CIFAR10' object has no attribute 'targets',错误 还有一个问题就是:由于用torchvision下载太慢,我先把数据集下下来了,然后放在了data目录下面,这个对结果会有影响嘛? 希望大家可以给点建议和意见,谢谢。 加载数据集的代码如下: ``` def get_cifar10(root, n_labeled, transform_train=None, transform_val=None, download=True): base_dataset = torchvision.datasets.CIFAR10(root, train=True, target_transform=True, download=download,) train_labeled_idxs, train_unlabeled_idxs, val_idxs = train_val_split(base_dataset.targets, int(n_labeled/10)) train_labeled_dataset = CIFAR10_labeled(root, train_labeled_idxs, train=True, transform=transform_train) train_unlabeled_dataset = CIFAR10_unlabeled(root, train_unlabeled_idxs, train=True, transform=TransformTwice(transform_train)) val_dataset = CIFAR10_labeled(root, val_idxs, train=True, transform=transform_val, download=True) test_dataset = CIFAR10_labeled(root, train=False, transform=transform_val, download=True) print (f"#Labeled: {len(train_labeled_idxs)} #Unlabeled: {len(train_unlabeled_idxs)} #Val: {len(val_idxs)}") return train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset ``` ``` def train_val_split(labels, n_labeled_per_class): labels = np.array(labels) train_labeled_idxs = [] train_unlabeled_idxs = [] val_idxs = [] for i in range(10): idxs = np.where(labels == i)[0] np.random.shuffle(idxs) train_labeled_idxs.extend(idxs[:n_labeled_per_class]) train_unlabeled_idxs.extend(idxs[n_labeled_per_class:-500]) val_idxs.extend(idxs[-500:]) np.random.shuffle(train_labeled_idxs) np.random.shuffle(train_unlabeled_idxs) np.random.shuffle(val_idxs) return train_labeled_idxs, train_unlabeled_idxs, val_idxs ``` 错误信息如下 (base) D:\CSStudy\PycharmProject\MixMatch-pytorch-master>python train.py --gpu 0 --n-labeled 250 --out cifar10@250 ==> Preparing cifar10 Using downloaded and verified file: ./data\cifar-10-python.tar.gz Traceback (most recent call last): File "train.py", line 431, in <module> main() File "train.py", line 88, in main train_labeled_set, train_unlabeled_set, val_set, test_set = dataset.get_cifar10('./data', args.n_labeled, transform_train=transform_train, transf orm_val=transform_val) File "D:\CSStudy\PycharmProject\MixMatch-pytorch-master\dataset\cifar10.py", line 21, in get_cifar10 train_labeled_idxs, train_unlabeled_idxs, val_idxs = train_val_split(base_dataset.targets, int(n_labeled/10)) AttributeError: 'CIFAR10' object has no attribute 'targets'
fashion_mnist识别准确率问题
fashion_mnist识别准确率一般为多少呢?我看好多人都是92%左右,但是我用一个网络达到了94%,想问问做过的小伙伴到底是多少? ``` #这是我的结果示意 x_shape: (60000, 28, 28) y_shape: (60000,) epoches: 0 val_acc: 0.4991 train_acc 0.50481665 epoches: 1 val_acc: 0.6765 train_acc 0.66735 epoches: 2 val_acc: 0.755 train_acc 0.7474 epoches: 3 val_acc: 0.7846 train_acc 0.77915 epoches: 4 val_acc: 0.798 train_acc 0.7936 epoches: 5 val_acc: 0.8082 train_acc 0.80365 epoches: 6 val_acc: 0.8146 train_acc 0.8107 epoches: 7 val_acc: 0.8872 train_acc 0.8872333 epoches: 8 val_acc: 0.896 train_acc 0.89348334 epoches: 9 val_acc: 0.9007 train_acc 0.8986 epoches: 10 val_acc: 0.9055 train_acc 0.90243334 epoches: 11 val_acc: 0.909 train_acc 0.9058833 epoches: 12 val_acc: 0.9112 train_acc 0.90868336 epoches: 13 val_acc: 0.9126 train_acc 0.91108334 epoches: 14 val_acc: 0.9151 train_acc 0.9139 epoches: 15 val_acc: 0.9172 train_acc 0.91595 epoches: 16 val_acc: 0.9191 train_acc 0.91798335 epoches: 17 val_acc: 0.9204 train_acc 0.91975 epoches: 18 val_acc: 0.9217 train_acc 0.9220333 epoches: 19 val_acc: 0.9252 train_acc 0.9234667 epoches: 20 val_acc: 0.9259 train_acc 0.92515 epoches: 21 val_acc: 0.9281 train_acc 0.9266667 epoches: 22 val_acc: 0.9289 train_acc 0.92826664 epoches: 23 val_acc: 0.9301 train_acc 0.93005 epoches: 24 val_acc: 0.9315 train_acc 0.93126667 epoches: 25 val_acc: 0.9322 train_acc 0.9328 epoches: 26 val_acc: 0.9331 train_acc 0.9339667 epoches: 27 val_acc: 0.9342 train_acc 0.93523335 epoches: 28 val_acc: 0.9353 train_acc 0.93665 epoches: 29 val_acc: 0.9365 train_acc 0.9379333 epoches: 30 val_acc: 0.9369 train_acc 0.93885 epoches: 31 val_acc: 0.9387 train_acc 0.9399 epoches: 32 val_acc: 0.9395 train_acc 0.9409 epoches: 33 val_acc: 0.94 train_acc 0.9417667 epoches: 34 val_acc: 0.9403 train_acc 0.94271666 epoches: 35 val_acc: 0.9409 train_acc 0.9435167 epoches: 36 val_acc: 0.9418 train_acc 0.94443333 epoches: 37 val_acc: 0.942 train_acc 0.94515 epoches: 38 val_acc: 0.9432 train_acc 0.9460667 epoches: 39 val_acc: 0.9443 train_acc 0.9468833 epoches: 40 val_acc: 0.9445 train_acc 0.94741666 epoches: 41 val_acc: 0.9462 train_acc 0.9482 epoches: 42 val_acc: 0.947 train_acc 0.94893336 epoches: 43 val_acc: 0.9472 train_acc 0.94946665 epoches: 44 val_acc: 0.948 train_acc 0.95028335 epoches: 45 val_acc: 0.9486 train_acc 0.95095 epoches: 46 val_acc: 0.9488 train_acc 0.9515833 epoches: 47 val_acc: 0.9492 train_acc 0.95213336 epoches: 48 val_acc: 0.9495 train_acc 0.9529833 epoches: 49 val_acc: 0.9498 train_acc 0.9537 val_acc: 0.9498 ``` ``` import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt def to_onehot(y,num): lables = np.zeros([num,len(y)]) for i in range(len(y)): lables[y[i],i] = 1 return lables.T # 预处理数据 mnist = keras.datasets.fashion_mnist (train_images,train_lables),(test_images,test_lables) = mnist.load_data() print('x_shape:',train_images.shape) #(60000) print('y_shape:',train_lables.shape) X_train = train_images.reshape((-1,train_images.shape[1]*train_images.shape[1])) / 255.0 #X_train = tf.reshape(X_train,[-1,X_train.shape[1]*X_train.shape[2]]) Y_train = to_onehot(train_lables,10) X_test = test_images.reshape((-1,test_images.shape[1]*test_images.shape[1])) / 255.0 Y_test = to_onehot(test_lables,10) #双隐层的神经网络 input_nodes = 784 output_nodes = 10 layer1_nodes = 100 layer2_nodes = 50 batch_size = 100 learning_rate_base = 0.8 learning_rate_decay = 0.99 regularization_rate = 0.0000001 epochs = 50 mad = 0.99 learning_rate = 0.005 # def inference(input_tensor,avg_class,w1,b1,w2,b2): # if avg_class == None: # layer1 = tf.nn.relu(tf.matmul(input_tensor,w1)+b1) # return tf.nn.softmax(tf.matmul(layer1,w2) + b2) # else: # layer1 = tf.nn.relu(tf.matmul(input_tensor,avg_class.average(w1)) + avg_class.average(b1)) # return tf.matual(layer1,avg_class.average(w2)) + avg_class.average(b2) def train(mnist): X = tf.placeholder(tf.float32,[None,input_nodes],name = "input_x") Y = tf.placeholder(tf.float32,[None,output_nodes],name = "y_true") w1 = tf.Variable(tf.truncated_normal([input_nodes,layer1_nodes],stddev=0.1)) b1 = tf.Variable(tf.constant(0.1,shape=[layer1_nodes])) w2 = tf.Variable(tf.truncated_normal([layer1_nodes,layer2_nodes],stddev=0.1)) b2 = tf.Variable(tf.constant(0.1,shape=[layer2_nodes])) w3 = tf.Variable(tf.truncated_normal([layer2_nodes,output_nodes],stddev=0.1)) b3 = tf.Variable(tf.constant(0.1,shape=[output_nodes])) layer1 = tf.nn.relu(tf.matmul(X,w1)+b1) A2 = tf.nn.relu(tf.matmul(layer1,w2)+b2) A3 = tf.nn.relu(tf.matmul(A2,w3)+b3) y_hat = tf.nn.softmax(A3) # y_hat = inference(X,None,w1,b1,w2,b2) # global_step = tf.Variable(0,trainable=False) # variable_averages = tf.train.ExponentialMovingAverage(mad,global_step) # varible_average_op = variable_averages.apply(tf.trainable_variables()) #y = inference(x,variable_averages,w1,b1,w2,b2) cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=A3,labels=Y)) regularizer = tf.contrib.layers.l2_regularizer(regularization_rate) regularization = regularizer(w1) + regularizer(w2) +regularizer(w3) loss = cross_entropy + regularization * regularization_rate # learning_rate = tf.train.exponential_decay(learning_rate_base,global_step,epchos,learning_rate_decay) # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) # with tf.control_dependencies([train_step,varible_average_op]): # train_op = tf.no_op(name="train") correct_prediction = tf.equal(tf.argmax(y_hat,1),tf.argmax(Y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) total_loss = [] val_acc = [] total_train_acc = [] x_Xsis = [] with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(epochs): # x,y = next_batch(X_train,Y_train,batch_size) batchs = int(X_train.shape[0] / batch_size + 1) loss_e = 0. for j in range(batchs): batch_x = X_train[j*batch_size:min(X_train.shape[0],j*(batch_size+1)),:] batch_y = Y_train[j*batch_size:min(X_train.shape[0],j*(batch_size+1)),:] sess.run(train_step,feed_dict={X:batch_x,Y:batch_y}) loss_e += sess.run(loss,feed_dict={X:batch_x,Y:batch_y}) # train_step.run(feed_dict={X:x,Y:y}) validate_acc = sess.run(accuracy,feed_dict={X:X_test,Y:Y_test}) train_acc = sess.run(accuracy,feed_dict={X:X_train,Y:Y_train}) print("epoches: ",i,"val_acc: ",validate_acc,"train_acc",train_acc) total_loss.append(loss_e / batch_size) val_acc.append(validate_acc) total_train_acc.append(train_acc) x_Xsis.append(i) validate_acc = sess.run(accuracy,feed_dict={X:X_test,Y:Y_test}) print("val_acc: ",validate_acc) return (x_Xsis,total_loss,total_train_acc,val_acc) result = train((X_train,Y_train,X_test,Y_test)) def plot_acc(total_train_acc,val_acc,x): plt.figure() plt.plot(x,total_train_acc,'--',color = "red",label="train_acc") plt.plot(x,val_acc,color="green",label="val_acc") plt.xlabel("Epoches") plt.ylabel("acc") plt.legend() plt.show() ```
theano 报错 module 'configparser' has no attribute 'ConfigParser' 用的是Anaconda3 python3.6
>theano 报错 module 'configparser' has no attribute 'ConfigParser' 用的是Win10 Anaconda3 python3.6 ``` from sklearn.datasets import load_boston import theano.tensor as T import numpy as np import matplotlib.pyplot as plt import theano class Layer(object): def __init__(self,inputs,in_size,out_size,activation_function=None): self.W = theano.shared(np.random.normal(0,1,(in_size,out_size))) self.b = theano.shared(np.zeros((out_size,)) + 0.1) self.Wx_plus_b = T.dot(inputs, self.W) + self.b self.activation_function = activation_function if activation_function is None: self.outputs = self.Wx_plus_b else: self.outputs = self.activation_function(self.Wx_plus_b) def minmax_normalization(data): xs_max = np.max(data, axis=0) xs_min = np.min(data, axis=0) xs = (1-0)*(data - xs_min)/(xs_max - xs_min) + 0 return xs np.random.seed(100) x_dataset = load_boston() x_data = x_dataset.data # minmax normalization, rescale the inputs x_data = minmax_normalization(x_data) y_data = x_dataset.target[:,np.newaxis] #cross validation, train test data split x_train, y_train = x_data[:400], y_data[:400] x_test, y_test = x_data[400:], y_data[400:] x = T.dmatrix('x') y = T.dmatrix('y') l1 = Layer(x, 13, 50, T.tanh) l2 = Layer(l1.outputs, 50, 1, None) #compute cost cost = T.mean(T.square(l2.outputs - y)) #cost = T.mean(T.square(l2.outputs - y)) + 0.1*((l1.W**2).sum() + (l2.W**2).sum()) #l2 regulization #cost = T.mean(T.square(l2.outputs - y)) + 0.1*(abs(l1.W).sum() + abs(l2.W).sum()) #l1 regulization gW1, gb1, gW2, gb2 = T.grad(cost, [l1.W,l1.b,l2.W,l2.b]) #gradient descend learning_rate = 0.01 train = theano.function(inputs=[x,y], updates=[(l1.W,l1.W-learning_rate*gW1), (l1.b,l1.b-learning_rate*gb1), (l2.W,l2.W-learning_rate*gW2), (l2.b,l2.b-learning_rate*gb2)]) compute_cost = theano.function(inputs=[x,y], outputs=cost) #record cost train_err_list = [] test_err_list = [] learning_time = [] for i in range(1000): if 1%10 == 0: #record cost train_err_list.append(compute_cost(x_train,y_train)) test_err_list.append(compute_cost(x_test,y_test)) learning_time.append(i) #plot cost history plt.plot(learning_time, train_err_list, 'r-') plt.plot(learning_time, test_err_list,'b--') plt.show() #作者 morvan莫凡 https://morvanzhou.github.io ``` 报错了: Traceback (most recent call last): File "C:/Users/Elena/PycharmProjects/theano/regularization.py", line 1, in <module> from sklearn.datasets import load_boston File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\datasets\__init__.py", line 22, in <module> from .twenty_newsgroups import fetch_20newsgroups File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\datasets\twenty_newsgroups.py", line 44, in <module> from ..feature_extraction.text import CountVectorizer File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\feature_extraction\__init__.py", line 10, in <module> from . import text File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py", line 28, in <module> from ..preprocessing import normalize File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\preprocessing\__init__.py", line 6, in <module> from ._function_transformer import FunctionTransformer File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\preprocessing\_function_transformer.py", line 5, in <module> from ..utils.testing import assert_allclose_dense_sparse File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\utils\testing.py", line 61, in <module> from nose.tools import raises as _nose_raises File "C:\Users\Elena\Anaconda3\lib\site-packages\nose\__init__.py", line 1, in <module> from nose.core import collector, main, run, run_exit, runmodule File "C:\Users\Elena\Anaconda3\lib\site-packages\nose\core.py", line 11, in <module> from nose.config import Config, all_config_files File "C:\Users\Elena\Anaconda3\lib\site-packages\nose\config.py", line 6, in <module> import configparser File "C:\Users\Elena\Anaconda3\Lib\site-packages\theano\configparser.py", line 15, in <module> import theano File "C:\Users\Elena\Anaconda3\lib\site-packages\theano\__init__.py", line 88, in <module> from theano.configdefaults import config File "C:\Users\Elena\Anaconda3\lib\site-packages\theano\configdefaults.py", line 17, in <module> from theano.configparser import (AddConfigVar, BoolParam, ConfigParam, EnumStr, File "C:\Users\Elena\Anaconda3\lib\site-packages\theano\configparser.py", line 77, in <module> theano_cfg = (configparser.ConfigParser if PY3 **AttributeError: module 'configparser' has no attribute 'ConfigParser**' 把theano里的configparser.py文件里的ConfigParser改成了configparser还是不行 换了模块import configparsor也不行。。。![图片说明](https://img-ask.csdn.net/upload/201909/30/1569832318_223436.png)
解决python __init__() missing 1 required positional argument: 'layers'
运行的时候出现了__init__() missing 1 required positional argument: 'layers',有大佬知道怎么解决吗 ``` from sklearn import datasets boston=datasets.load_boston() x,y=boston.data, boston.target from sklearn import preprocessing x_MinMax=preprocessing.MinMaxScaler() y_MinMax=preprocessing.MinMaxScaler() import numpy as np y=np.array(y).reshape((len(y),1)) #np.array确保y是numpy数组 x=x_MinMax.fit_transform(x) #fit_transform先拟合数据,然后转化它将其转化为标准形式 y=y_MinMax.fit_transform(y) x.mean(axis=0) #均值为0 import random from sklearn.cross_validation import train_test_split np.random.seed(2016) x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2) from sknn.mlp import Regressor,Layer #预测模型 fit1=Regressor(Layers=[Layer('Sigmoid',units=6),Layer('Sigmoid',units=14), Layer('Linear')],learning_rate=0.02, random_state=2016, n_iter=10 ) fit1.fit(x_train,y_train) predict_train=fit1.predict(x_train) from sklearn.metrics import mean_squared_error mse_1=mean_squared_error(predict_train,y_train) print(mse_1) ```
如何解决cannot import name 'downsample'
程序运行到最后一句话fit1.fit(x_train,y_train)的时候报错,不知道怎么解决,大佬可以帮忙解答一下吗 ``` from sklearn import datasets boston=datasets.load_boston() x,y=boston.data, boston.target from sklearn import preprocessing x_MinMax=preprocessing.MinMaxScaler() y_MinMax=preprocessing.MinMaxScaler() import numpy as np y=np.array(y).reshape((len(y),1)) x=x_MinMax.fit_transform(x) #np.array确保y是numpy数组 y=y_MinMax.fit_transform(y) x.mean(axis=0) import random from sklearn.cross_validation import train_test_split np.random.seed(2016) x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2) from sknn.mlp import Regressor,Layer #预测模型 fit1=Regressor(layers=[Layer('Sigmoid',units=6),Layer('Sigmoid',units=14), Layer('Linear')],learning_rate=0.02, random_state=2016, n_iter=10 ) fit1.fit(x_train,y_train) ```
pycharm使用keras出现进度条信息多行打印
最近在用pycharm运行keras方面的代码时,会出现进度条多行打印问题,不知道是什么原因,但是我把代码放在Spyder上运行时,进度条是正常单行更新的,代码是深度学习的一个例程。在百度上也没搜到好的解决方法,恳请大家能帮忙解决这个问题, ``` from keras import layers,models from keras.datasets import mnist from keras.utils import to_categorical (train_images,train_labels),(test_images,test_labels) = mnist.load_data() train_images = train_images.reshape((60000,28,28,1)) train_images = train_images.astype('float32')/255 test_images = test_images.reshape((10000,28,28,1)) test_images = test_images.astype('float32')/255 train_labels = to_categorical(train_labels) test_labels = to_categorical(test_labels) model = models.Sequential() model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1))) model.add(layers.MaxPool2D(2,2)) model.add(layers.Conv2D(64,(3,3),activation='relu')) model.add(layers.MaxPool2D(2,2)) model.add(layers.Conv2D(64,(3,3),activation='relu')) model.add(layers.Flatten()) model.add(layers.Dense(64,activation='relu')) model.add(layers.Dense(10,activation='softmax')) model.summary() model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(train_images,train_labels,epochs=6,batch_size=64) #test_loss,test_acc = model.evaluate(test_images,test_labels) # print(test_loss,test_acc) ``` ![图片说明](https://img-ask.csdn.net/upload/201910/07/1570448232_727191.png)
python import 和from import 区别
上网查了一下,貌似是一个简单问题,网上都是这么说的,我之前也是这么理解的: 假设有一个包名字叫 numpy, 里面有一个函数 zeros 1、 from numpy import zeros 那么你可以直接调用 zeros()函数 2、import numpy.zeros 那么你只能使用全名 numpy.zeros 但今天遇到了问题了。 ``` from sklearn import datasets a=datasets.load_digits() print(a) ``` 这个代码没问题 但是下面这个代码居然提示错误 ``` import sklearn a=sklearn.datasets.load_digits() print(a) ``` 提示错误是:AttributeError: module 'sklearn' has no attribute 'datasets' 什么鬼,看不懂,不是说import sklearn 后应该可以通过点.来访问sklearn里的所有内容吗,就像numpy中那样。初学者,不懂,求大神解释下,不胜感激!! 这样也报错 ``` import sklearn.datasets a=datasets.load_digits() print(a) ``` NameError: name 'datasets' is not defined
运行tensorflow时出现tensorflow.python.framework.errors_impl.InternalError: Blas GEMM launch failed这个错误
运行tensorflow时出现tensorflow.python.framework.errors_impl.InternalError: Blas GEMM launch failed这个错误,查了一下说是gpu被占用了,从下面这里开始出问题的: ``` 2019-10-17 09:28:49.495166: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1304] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 6382 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1070, pci bus id: 0000:01:00.0, compute capability: 6.1) (60000, 28, 28) (60000, 10) 2019-10-17 09:28:51.275415: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cublas64_100.dll'; dlerror: cublas64_100.dll not found ``` ![图片说明](https://img-ask.csdn.net/upload/201910/17/1571277238_292620.png) 最后显示的问题: ![图片说明](https://img-ask.csdn.net/upload/201910/17/1571277311_655722.png) 试了一下网上的方法,比如加代码: ``` gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) ``` 但最后提示: ![图片说明](https://img-ask.csdn.net/upload/201910/17/1571277460_72752.png) 现在不知道要怎么解决了。新手想试下简单的数字识别,步骤也是按教程一步步来的,可能用的版本和教程不一样,我用的是刚下的:2.0tensorflow和以下: ![图片说明](https://img-ask.csdn.net/upload/201910/17/1571277627_439100.png) 不知道会不会有版本问题,现在紧急求助各位大佬,还有没有其它可以尝试的方法。测试程序加法运算可以执行,数字识别图片运行的时候我看了下,GPU最大占有率才0.2%,下面是完整数字图片识别代码: ``` import os import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers, optimizers, datasets os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2) #sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) (x, y), (x_val, y_val) = datasets.mnist.load_data() x = tf.convert_to_tensor(x, dtype=tf.float32) / 255. y = tf.convert_to_tensor(y, dtype=tf.int32) y = tf.one_hot(y, depth=10) print(x.shape, y.shape) train_dataset = tf.data.Dataset.from_tensor_slices((x, y)) train_dataset = train_dataset.batch(200) model = keras.Sequential([ layers.Dense(512, activation='relu'), layers.Dense(256, activation='relu'), layers.Dense(10)]) optimizer = optimizers.SGD(learning_rate=0.001) def train_epoch(epoch): # Step4.loop for step, (x, y) in enumerate(train_dataset): with tf.GradientTape() as tape: # [b, 28, 28] => [b, 784] x = tf.reshape(x, (-1, 28 * 28)) # Step1. compute output # [b, 784] => [b, 10] out = model(x) # Step2. compute loss loss = tf.reduce_sum(tf.square(out - y)) / x.shape[0] # Step3. optimize and update w1, w2, w3, b1, b2, b3 grads = tape.gradient(loss, model.trainable_variables) # w' = w - lr * grad optimizer.apply_gradients(zip(grads, model.trainable_variables)) if step % 100 == 0: print(epoch, step, 'loss:', loss.numpy()) def train(): for epoch in range(30): train_epoch(epoch) if __name__ == '__main__': train() ``` 希望能有人给下建议或解决方法,拜谢!
Tensorflow 2.0 : When using data tensors as input to a model, you should specify the `steps_per_epoch` argument.
下面代码每次执行到epochs 中的最后一个step 都会报错,请教大牛这是什么问题呢? ``` import tensorflow_datasets as tfds dataset, info = tfds.load('imdb_reviews/subwords8k', with_info=True, as_supervised=True) train_dataset,test_dataset = dataset['train'],dataset['test'] tokenizer = info.features['text'].encoder print('vocabulary size: ', tokenizer.vocab_size) sample_string = 'Hello world, tensorflow' tokenized_string = tokenizer.encode(sample_string) print('tokened id: ', tokenized_string) src_string= tokenizer.decode(tokenized_string) print(src_string) for t in tokenized_string: print(str(t) + ': '+ tokenizer.decode([t])) BUFFER_SIZE=6400 BATCH_SIZE=64 num_train_examples = info.splits['train'].num_examples num_test_examples=info.splits['test'].num_examples print("Number of training examples: {}".format(num_train_examples)) print("Number of test examples: {}".format(num_test_examples)) train_dataset=train_dataset.shuffle(BUFFER_SIZE) train_dataset=train_dataset.padded_batch(BATCH_SIZE,train_dataset.output_shapes) test_dataset=test_dataset.padded_batch(BATCH_SIZE,test_dataset.output_shapes) def get_model(): model=tf.keras.Sequential([ tf.keras.layers.Embedding(tokenizer.vocab_size,64), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(64,activation='relu'), tf.keras.layers.Dense(1,activation='sigmoid') ]) return model model =get_model() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) import math #from tensorflow import keras #train_dataset= keras.preprocessing.sequence.pad_sequences(train_dataset, maxlen=BUFFER_SIZE) history =model.fit(train_dataset, epochs=2, steps_per_epoch=(math.ceil(BUFFER_SIZE/BATCH_SIZE) -90 ), validation_data= test_dataset) ``` Train on 10 steps Epoch 1/2 9/10 [==========================>...] - ETA: 3s - loss: 0.6955 - accuracy: 0.4479 --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-111-8ddec076c096> in <module> 6 epochs=2, 7 steps_per_epoch=(math.ceil(BUFFER_SIZE/BATCH_SIZE) -90 ), ----> 8 validation_data= test_dataset) /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs) 726 max_queue_size=max_queue_size, 727 workers=workers, --> 728 use_multiprocessing=use_multiprocessing) 729 730 def evaluate(self, /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_arrays.py in fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs) 672 validation_steps=validation_steps, 673 validation_freq=validation_freq, --> 674 steps_name='steps_per_epoch') 675 676 def evaluate(self, /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_arrays.py in model_iteration(model, inputs, targets, sample_weights, batch_size, epochs, verbose, callbacks, val_inputs, val_targets, val_sample_weights, shuffle, initial_epoch, steps_per_epoch, validation_steps, validation_freq, mode, validation_in_fit, prepared_feed_values_from_dataset, steps_name, **kwargs) 437 validation_in_fit=True, 438 prepared_feed_values_from_dataset=(val_iterator is not None), --> 439 steps_name='validation_steps') 440 if not isinstance(val_results, list): 441 val_results = [val_results] /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_arrays.py in model_iteration(model, inputs, targets, sample_weights, batch_size, epochs, verbose, callbacks, val_inputs, val_targets, val_sample_weights, shuffle, initial_epoch, steps_per_epoch, validation_steps, validation_freq, mode, validation_in_fit, prepared_feed_values_from_dataset, steps_name, **kwargs) 174 if not is_dataset: 175 num_samples_or_steps = _get_num_samples_or_steps(ins, batch_size, --> 176 steps_per_epoch) 177 else: 178 num_samples_or_steps = steps_per_epoch /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_arrays.py in _get_num_samples_or_steps(ins, batch_size, steps_per_epoch) 491 return steps_per_epoch 492 return training_utils.check_num_samples(ins, batch_size, steps_per_epoch, --> 493 'steps_per_epoch') 494 495 /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_utils.py in check_num_samples(ins, batch_size, steps, steps_name) 422 raise ValueError('If ' + steps_name + 423 ' is set, the `batch_size` must be None.') --> 424 if check_steps_argument(ins, steps, steps_name): 425 return None 426 /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_utils.py in check_steps_argument(input_data, steps, steps_name) 1199 raise ValueError('When using {input_type} as input to a model, you should' 1200 ' specify the `{steps_name}` argument.'.format( -> 1201 input_type=input_type_str, steps_name=steps_name)) 1202 return True 1203 ValueError: When using data tensors as input to a model, you should specify the `steps_per_epoch` argument.
导入模块错误:(无法从“string”导入名称“atof”)【python】
代码: import os,sys sys.path.append('/nfs3group/chlgrp/datasets/Animals_with_Attributes/code/') from numpy import * from platt import * import pickle, bz2 def nameonly(x): return x.split('\t')[1] def loadstr(openname,converter=str): return [converter(c.strip()) for c in open(openname).readlines()] def bzUnpickle(openname): return pickle.load(bz2.BZ2File(openname)) feature_pattern = '/D:/shuxingfenlei/AwA-features/Animals_with_Attributes/Features/hist/%s-%s.txt' labels_pattern = '/D:/shuxingfenlei/AwA2-features/Animals_with_Attributes2/Features/ResNet101/%s-AwA2-labels.txt' all_features = ['cq','lss','phog','sift','surf','rgsift'] attribute_matrix = 2*loadstr('/shuxingfenlei/AwA2-data/Animals_with_Attributes2/predicate-matrix-binary.txt',dtype=float)-1 classnames = loadstr('/shuxingfenlei/AwA2-data/Animals_with_Attributes2/classes.txt',nameonly) attributenames = loadstr('/shuxingfenlei/AwA2-data/Animals_with_Attributes2/predicates.txt',nameonly) def create_data(all_classes,attribute_id): featurehist={} for feature in all_features: featurehist[feature]=[] labels=[] for classname in all_classes: class_id = classnames.index(classname) class_size = 0 for feature in all_features: featurefilename = feature_pattern % (classname,feature) print ('# ',featurefilename) histopen = bzUnpickle(featurefilename) featurehist[feature].extend( histopen ) labelfilename = labels_pattern % classname print ('# ',labelfilename) print ('#') labels.extend( bzUnpickle(labelfilename)[:,attribute_id] ) for feature in all_features: featurehist[feature]=array(featurehist[feature]).T # shogun likes its data matrices shaped FEATURES x SAMPLES labels = array(labels) return featurehist,labels def train_attribute(attribute_id, C, split=0): from shogun import Classifier,Features,Kernel,Distance attribute_id = int(attribute_id) print ("# attribute ",attributenames[attribute_id]) C = float(C) print ("# C ", C) if split == 0: train_classes=loadstr('/nfs3group/chlgrp/datasets/Animals_with_Attributes/trainclasses.txt') test_classes=loadstr('/nfs3group/chlgrp/datasets/Animals_with_Attributes/testclasses.txt') else: classnames = loadstr('/nfs3group/chlgrp/datasets/Animals_with_Attributes/classnames.txt') startid= (split-1)*10 stopid = split*10 test_classes = classnames[startid:stopid] train_classes = classnames[0:startid]+classnames[stopid:] Xtrn,Ltrn = create_data(train_classes,attribute_id) Xtst,Ltst = create_data(test_classes,attribute_id) if min(Ltrn) == max(Ltrn): # only 1 class Lprior = mean(Ltrn) prediction = sign(Lprior)*ones(len(Ltst)) probabilities = 0.1+0.8*0.5*(Lprior+1.)*ones(len(Ltst)) # fallback return prediction,probabilities,Ltst #sg('loglevel', 'WARN') widths={} for feature in all_features: traindata = array(Xtrn[feature][:,::50],float) # used to be 5*offset sg('set_distance', 'CHISQUARE', 'REAL') sg('clean_features', 'TRAIN') sg('set_features', 'TRAIN', traindata) sg('init_distance', 'TRAIN') DM=sg('get_distance_matrix') widths[feature] = median(DM.flatten()) del DM s = Classifier.LibSVM() #sg('new_svm', 'LIBSVM') Lplatt_trn = concatenate([Ltrn[i::10] for i in range(9)]) # 90% for training Lplatt_val = Ltrn[9::10] # remaining 10% for platt scaling feats_trn = Features.CombinedFeatures() feats_val = Features.CombinedFeatures() for feature in all_features: Xplatt_trn = concatenate([Xtrn[feature][:,i::10] for i in range(9)], axis=1) feats_trn.append_feature_obj( Features.RealFeatures(ascontiguousarray(Xplatt_trn)) ) #sg('add_features', 'TRAIN', Xplatt_trn) Xplatt_val = Xtrn[feature][:,9::10] feats_val.append_feature_obj( Features.RealFeatures(ascontiguousarray(Xplatt_val)) ) #sg('add_features', 'TEST', Xplatt_val) del Xplatt_trn,Xplatt_val,Xtrn[feature] labels_trn = Features.Labels(Lplatt_trn) #sg('set_labels', 'TRAIN', Lplatt_trn) kernel = Kernel.CombinedKernel() #sg('set_kernel', 'COMBINED', 5000) for featureset in all_features: kernel.append_kernel( Kernel.Chi2Kernel( 5000, widths[featureset]/5. ) ) #sg('add_kernel', 1., 'CHI2', 'REAL', 10, widths[featureset]/5. ) kernel.init(feats_trn,feats_trn) K=kernel.get_kernel_matrix() K.tofile('/scratch/chl/cvfold%d_C%g_%02d-trn.kernel' % (split, C, attribute_id)) del K s.set_max_train_time(600*60.) #sg('svm_max_train_time', 600*60.) # one hour should be plenty s.set_C(C,C) #sg('c', C) s.set_kernel(kernel) s.set_labels(labels_trn) #sg('init_kernel', 'TRAIN') try: s.train() #sg('train_classifier') except (RuntimeWarning,RuntimeError): # can't train, e.g. all samples have the same labels Lprior = mean(Ltrn) prediction = sign(Lprior) * ones(len(Ltst)) probabilities = 0.1+0.8*0.5*(Lprior+1.) * ones(len(Ltst)) savetxt('./DAP/cvfold%d_C%g_%02d.txt' % (split, C, attribute_id), prediction) savetxt('./DAP/cvfold%d_C%g_%02d.prob' % (split, C, attribute_id), probabilities) savetxt('./DAP/cvfold%d_C%g_%02d.labels' % (split, C, attribute_id), Ltst) return prediction,probabilities,Ltst bias = s.get_bias() alphas = s.get_alphas() #[bias, alphas]=sg('get_svm') #print bias,alphas kernel.init(feats_trn,feats_val) K=kernel.get_kernel_matrix() K.tofile('/scratch/chl/cvfold%d_C%g_%02d-val.kernel' % (split, C, attribute_id)) del K #sg('init_kernel', 'TEST') try: prediction=s.classify().get_labels() #prediction=sg('classify') platt_params = SigmoidTrain(prediction, Lplatt_val) probabilities = SigmoidPredict(prediction, platt_params) savetxt('./DAP/cvfold%d_C%g_%02d-val.txt' % (split, C, attribute_id), prediction) savetxt('./DAP/cvfold%d_C%g_%02d-val.prob' % (split, C, attribute_id), probabilities) savetxt('./DAP/cvfold%d_C%g_%02d-val.labels' % (split, C, attribute_id), Lplatt_val) savetxt('./DAP/cvfold%d_C%g_%02d-val.platt' % (split, C, attribute_id), platt_params) #print '#train-perf ',attribute_id,C,mean((prediction*Lplatt_val)>0),mean(Lplatt_val>0) #print '#platt-perf ',attribute_id,C,mean((sign(probabilities-0.5)*Lplatt_val)>0),mean(Lplatt_val>0) except RuntimeError: Lprior = mean(Ltrn) prediction = sign(Lprior)*ones(len(Ltst)) probabilities = 0.1+0.8*0.5*(Lprior+1.)*ones(len(Ltst)) print (sys.stderr, "#Error during testing. Using constant platt scaling") platt_params=[1.,0.] # ----------------------------- now apply to test classes ------------------ feats_tst = Features.CombinedFeatures() #sg('clean_features', 'TEST') for feature in all_features: feats_tst.append_feature_obj( Features.RealFeatures(ascontiguousarray(Xtst[feature])) ) del Xtst[feature] kernel.init(feats_trn,feats_tst) K=kernel.get_kernel_matrix() K.tofile('/scratch/chl/cvfold%d_C%g_%02d-tst.kernel' % (split, C, attribute_id)) del K #sg('init_kernel', 'TEST') prediction=s.classify().get_labels() #prediction=sg('classify') probabilities = SigmoidPredict(prediction, platt_params) savetxt('./DAP/cvfold%d_C%g_%02d.txt' % (split, C, attribute_id), prediction) savetxt('./DAP/cvfold%d_C%g_%02d.prob' % (split, C, attribute_id), probabilities) savetxt('./DAP/cvfold%d_C%g_%02d.labels' % (split, C, attribute_id), Ltst) return prediction,probabilities,Ltst if __name__ == '__main__': import sys try: attribute_id = int(sys.argv[1]) except IndexError: print ("Must specify attribute ID!") raise SystemExit try: split = int(sys.argv[2]) except IndexError: split = 0 try: C = float(sys.argv[3]) except IndexError: C = 10. pred,prob,Ltst = train_attribute(attribute_id,C,split) print ("Done.", attribute_id, C, split)
自己制作的类似Fashion-MNIST数据集,怎么使用
在做想关项目,因为需要自己的数据集,因此我按照要求做了一个,如下 ![图片说明](https://img-ask.csdn.net/upload/201911/27/1574843320_334333.png) 用的是MXNet框架,jupyter notebook写 我自己的做法是把测试集和训练集用数组读取后包装 训练集如下,两个数组分别是图片像素和对应标签 ![图片说明](https://img-ask.csdn.net/upload/201911/27/1574843532_588917.png) 训练过程如下,为了能分别遍历又拆了train_iter为train_iter[0]、[1] ![图片说明](https://img-ask.csdn.net/upload/201911/27/1574843838_858564.png) 接着在导入训练模型(第12行)时候出现问题,报错如下 ![图片说明](https://img-ask.csdn.net/upload/201911/27/1574844074_977535.png) 搞了几天不明白这个数据类型,是导入数据集的方式错了还是、、、、 下面这个是载入Fashion-MNIST数据集的函数 没看明白,现在还在尝试,但是有大佬指导下就更好了(求~) ![图片说明](https://img-ask.csdn.net/upload/201911/27/1574844395_318556.png) 附代码 ``` def load_data_fashion_mnist(batch_size, resize=None, root=os.path.join( '~', '.mxnet', 'datasets', 'fashion-mnist')): root = os.path.expanduser(root) # 展开用户路径'~' transformer = [] if resize: transformer += [gdata.vision.transforms.Resize(resize)] transformer += [gdata.vision.transforms.ToTensor()] transformer = gdata.vision.transforms.Compose(transformer) mnist_train = gdata.vision.FashionMNIST(root=root, train=True) mnist_test = gdata.vision.FashionMNIST(root=root, train=False) num_workers = 0 if sys.platform.startswith('win32') else 4 train_iter = gdata.DataLoader( mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=num_workers) test_iter = gdata.DataLoader( mnist_test.transform_first(transformer), batch_size, shuffle=False, num_workers=num_workers) return train_iter, test_iter batch_size = 128 # 如出现“out of memory”的报错信息,可减小batch_size或resize train_iter, test_iter = load_data_fashion_mnist(batch_size, resize=224) ```
bad input shape (60000, 2)
本小白在看机器学习实战时,绘制精度、召回率相对阈值的函数图时报了错。 代码如下: ``` from sklearn.datasets import fetch_mldata import matplotlib import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import SGDClassifier from sklearn.model_selection import StratifiedKFold from sklearn.base import clone from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score,recall_score from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_curve from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score #导入部分 mnist = fetch_mldata('MNIST original') X,y = mnist["data"],mnist["target"] #显现部分 some_digit = X[36000] some_digit_image = some_digit.reshape(28,28) plt.imshow(some_digit_image,cmap=matplotlib.cm.binary,interpolation="nearest") plt.axis("off") #plt.show() #训练集和测试集 X_train,X_test,y_train,y_test=X[:60000],X[60000:],y[:60000],y[60000:] shuffle_index = np.random.permutation(60000) X_train,y_train = X_train[shuffle_index],y_train[shuffle_index] #二分分类器 y_train_5 = (y_train == 5) y_test_5 = (y_test == 5) sgd_clf = SGDClassifier(random_state=42) sgd_clf.fit(X_train,y_train_5) predict1 = sgd_clf.predict([some_digit]) print(predict1) #实施交叉验证 skfolds = StratifiedKFold(n_splits=3,random_state=42) for train_index,test_index in skfolds.split(X_train,y_train_5): clone_clf = clone(sgd_clf) X_train_folds = X_train[train_index] y_train_folds = (y_train_5[train_index]) X_test_fold = X_train[test_index] y_test_fold = (y_train_5[test_index]) clone_clf.fit(X_train_folds,y_train_folds) y_pred = clone_clf.predict(X_test_fold) n_correct = sum(y_pred == y_test_fold) print(n_correct/len(y_pred)) #kfold方法 print(cross_val_score(sgd_clf,X_train,y_train_5,cv=3,scoring="accuracy")) y_train_pred = cross_val_predict(sgd_clf,X_train,y_train_5,cv=3) #print(confusion_matrix(y_train_5,y_train_pred)) #print(precision_score(y_train_5,y_pred)) #精度 #print(recall_score(y_train_5,y_train_pred)) #召回率 #print(f1_score(y_train_5,y_pred)) #fi分数 y_scores = sgd_clf.decision_function([some_digit]) print(y_scores) #threshold = 0 #y_some_digit_pred = (y_scores>threshold) #print(y_some_digit_pred) #提高阈值 threshold = 200000 y_some_digit_pred = (y_scores>threshold) print(y_some_digit_pred) #绘制阈值函数图 y_scores = cross_val_predict(sgd_clf,X_train,y_train_5,cv=3,method="decision_function") precisions, recalls, thresholds = precision_recall_curve(y_train_5,y_scores) def plot_precison_recall_vs_threshold(precisions,recalls,thresholds): plt.plot(thresholds,precisions[:-1],"b--",label="Precision") plt.plot(thresholds, recalls[:-1], "g-", label="Recall") plt.xlabel("Threshold") plt.legend(loc="upper left") plt.ylim([0,1]) plot_precison_recall_vs_threshold(precisions,recalls,thresholds) plt.show() ``` 报错信息如下: Traceback (most recent call last): File "F:/python项目/mnist.py", line 77, in <module> precisions, recalls, thresholds = precision_recall_curve(y_train_5,y_scores) File "C:\Users\15701\Anaconda3\lib\site-packages\sklearn\metrics\ranking.py", line 417, in precision_recall_curve sample_weight=sample_weight) File "C:\Users\15701\Anaconda3\lib\site-packages\sklearn\metrics\ranking.py", line 304, in _binary_clf_curve y_score = column_or_1d(y_score) File "C:\Users\15701\Anaconda3\lib\site-packages\sklearn\utils\validation.py", line 583, in column_or_1d raise ValueError("bad input shape {0}".format(shape)) ValueError: bad input shape (60000, 2) 不胜感激
pytorch 中 写入class ConvNet(nn.Module): 语句之后 出现错误NameError: name 'ConvNet' is not defined 这是怎么回事?
```import torch import torch.nn as nn from torch.autograd import Variable import torch.optim as optim import torch.nn.functional as F import torchvision.datasets as dsets import torchvision.transforms as transforms import matplotlib.pyplot as plt import numpy as np #%matplotlib inline image_size=28 num_classes=10 num_epochs=20 batch_size=64 train_dataset=dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True) test_dataset=dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor()) train_loader=torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) indices=range(len(test_dataset)) indices_val=indices[:5000] indices_test=indices[5000:] sampler_val=torch.utils.data.sampler.SubsetRandomSampler(indices_val) sampler_test=torch.utils.data.sampler.SubsetRandomSampler(indices_test) validation_loader=torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, sampler=sampler_val ) test_loader=torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, sampler=sampler_test ) idx=110 #随机选取的 muteimg=train_dataset[idx][0].numpy() plt.imshow(muteimg[0,...]) plt.show() print('标签是:',train_dataset[idx][1]) depth=[4,8] class ConvNet(nn.Module): def __init__(self): super(ConvNet,self).__init__() self.conv1=nn.Conv2d(1,4,5,padding=2) self.pool=nn.MaxPool2d(2,2) self.conv2=nn.Conv2d(depth[0],depth[1],5,padding=2) self.fc1=nn.Linear(image_size//4*image_size//4*depth[1],512) self.fc2=nn.Linear(512,num_classes) def forward(self, x): x=self.conv1(x) x=F.relu(x) x=self.pool(x) x=self.conv2(x) x=F.relu(x) x=self.pool(x) x=x.view(-1,image_size//4*image_size//4*depth[1]) x=F.relu(self.fc1(x)) x=F.dropout(x,training=self.training) x=self.fc2(x) x=F.log_softmax(x,dim=1) return x def retrieve_features(self,x): feature_map1=F.relu(self.conv1(x)) x=self.pool(feature_map1) feature_map2=F.relu(self.conv2(x)) return (feature_map1,feature_map2) net=ConvNet() criterion=nn.CrossEntropyLoss() optimizer=optim.SGD(net.parameters(),lr=0.001,momentum=0.9) record=[] weights=[] for epoch in range(num_epochs): train_rights=[] for batch_idx,(data,target) in enumerate (train_loader): data,target=Variable(data),Variable(target) net.train() output=net(data) loss=criterion(output,target) optimizer.zero_grad() optimizer.step() right=rightness(output,target) train_rights.append(right) if batch_idx % 100 ==0: net.eval() val_rights=[] for(data,target) in validation_loader: data,target=Variable(data),Variable(target) output=net(data) right=rightness(output,target) val_rights.append(right) train_r=(sum([tup[o] for tup in train_rights]),sum([tup[1] for tup in train_rights])) val_r=(sum([tup[0] for tup in val_rights]),sum([tup[1] for tup in val_rights])) record.append((100 - 100.*train_r[0]/train_r[1],100-100.*val_r[0]/val_r[1])) weights.append([net.conv1.weight.data.clone(),net.conv1.bias.data.clone(), net.conv2.weight.data.clone(),net.conv2.bias.data.clone()]) net.eval() vals=[] for dara,target in test_loader: data,targrt=Variable(data,volatile=True),Variable(target) output = net(data) val=rightness(output,target) vals.append(val) rights=(sum([tup[0] for tup in vals]),sum([tup[1] for tup in vals])) rights_rate=1.0*rights[0]/rights[1] right_rate plt.figure(figsize=(10,7)) plt.plot(record) plt.xlabel('Steps') plt.ylabel('Error rate') ``` ```pytorch 中 写入class ConvNet(nn.Module): 语句之后 出现错误NameError: name 'ConvNet' is not defined 这是怎么回事
knn猫狗识别算法中样本为0报错
错误如下: ValueError: With n_samples=0, test_size=0.25 and train_size=None, the resulting train set will be empty. Adjust any of the aforementioned parameters. 代码如下: # import the necessary packages from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from pyimagesearch.preprocessing import SimplePreprocessor from pyimagesearch.datasets import SimpleDatasetLoader from imutils import paths import argparse # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-d", "--datasets", type=str, default="‪H:\\111\\try1\\pyimagesearch\\datasets\\animals\\", help="path to input dataset") ap.add_argument("-k", "--neighbors", type=int, default=1, help="# of nearest neighbors for classification") ap.add_argument("-j", "--jobs", type=int, default=-1, help="# of jobs for k-NN distance (-1 uses all available cores)") args = vars(ap.parse_args()) # grab the list of images that we’ll be describing print("[INFO] loading images...") imagePaths = list(paths.list_images(args["datasets"])) # initialize the image preprocessor, load the dataset from disk, # and reshape the data matrix sp = SimplePreprocessor.SimplePreprocessor(32, 32) sdl = SimpleDatasetLoader.SimpleDatasetLoader(preprocessors=[sp]) (data, labels) = sdl.load(imagePaths, verbose=500) data = data.reshape((data.shape[0], 3072)) # show some information on memory consumption of the images print("[INFO] features matrix: {:.1f}MB".format( data.nbytes / (1024 * 1000.0))) # encode the labels as integers le = LabelEncoder() labels = le.fit_transform(labels) # partition the data into training and testing splits using 75% of # the data for training and the remaining 25% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) # train and evaluate a k-NN classifier on the raw pixel intensities print("[INFO] evaluating k-NN classifier...") model = KNeighborsClassifier(n_neighbors=args["neighbors"], n_jobs=args["jobs"]) model.fit(trainX, trainY) print(classification_report(testY, model.predict(testX), target_names=le.classes_)) ``` ```
在Spyder界面中使用tensorflow进行fashion_mnist数据集学习,结果loss为非数,并且准确率一直未变
1.建立了一个3个全连接层的神经网络; 2.代码如下: ``` import matplotlib as mpl import matplotlib.pyplot as plt #%matplotlib inline import numpy as np import sklearn import pandas as pd import os import sys import time import tensorflow as tf from tensorflow import keras print(tf.__version__) print(sys.version_info) for module in mpl, np, sklearn, tf, keras: print(module.__name__,module.__version__) fashion_mnist = keras.datasets.fashion_mnist (x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data() x_valid, x_train = x_train_all[:5000], x_train_all[5000:] y_valid, y_train = y_train_all[:5000], y_train_all[5000:] #tf.keras.models.Sequential model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape= [28,28])) model.add(keras.layers.Dense(300, activation="relu")) model.add(keras.layers.Dense(100, activation="relu")) model.add(keras.layers.Dense(10,activation="softmax")) ###sparse为最后输出为index类型,如果为one hot类型,则不需加sparse model.compile(loss = "sparse_categorical_crossentropy",optimizer = "sgd", metrics = ["accuracy"]) #model.layers #model.summary() history = model.fit(x_train, y_train, epochs=10, validation_data=(x_valid,y_valid)) ``` 3.输出结果: ``` runfile('F:/new/new world/deep learning/tensorflow/ex2/tf_keras_classification_model.py', wdir='F:/new/new world/deep learning/tensorflow/ex2') 2.0.0 sys.version_info(major=3, minor=7, micro=4, releaselevel='final', serial=0) matplotlib 3.1.1 numpy 1.16.5 sklearn 0.21.3 tensorflow 2.0.0 tensorflow_core.keras 2.2.4-tf Train on 55000 samples, validate on 5000 samples Epoch 1/10 WARNING:tensorflow:Entity <function Function._initialize_uninitialized_variables.<locals>.initialize_variables at 0x0000025EAB633798> could not be transformed and will be executed as-is. Please report this to the AutoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: WARNING: Entity <function Function._initialize_uninitialized_variables.<locals>.initialize_variables at 0x0000025EAB633798> could not be transformed and will be executed as-is. Please report this to the AutoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: 55000/55000 [==============================] - 3s 58us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 2/10 55000/55000 [==============================] - 3s 48us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 3/10 55000/55000 [==============================] - 3s 47us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 4/10 55000/55000 [==============================] - 3s 48us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 5/10 55000/55000 [==============================] - 3s 47us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 6/10 55000/55000 [==============================] - 3s 48us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 7/10 55000/55000 [==============================] - 3s 47us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 8/10 55000/55000 [==============================] - 3s 48us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 9/10 55000/55000 [==============================] - 3s 48us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 Epoch 10/10 55000/55000 [==============================] - 3s 48us/sample - loss: nan - accuracy: 0.1008 - val_loss: nan - val_accuracy: 0.0914 ```
Grey Area 代码实现
Problem Description Dr. Grey is a data analyst, who visualizes various aspects of data received from all over the world everyday. He is extremely good at sophisticated visualization tools, but yet his favorite is a simple self-made histogram generator. Figure 1: A histogram Figure 1 is an example of histogram automatically produced by his histogram generator. A histogram is a visual display of frequencies of value occurrences as bars. In this example, values in the interval 0–9 occur five times, those in the interval 10–19 occur three times, and 20–29 and 30–39 once each. Dr. Grey’s histogram generator is a simple tool. First, the height of the histogram is fixed, that is, the height of the highest bar is always the same and those of the others are automatically adjusted proportionately. Second, the widths of bars are also fixed. It can only produce a histogram of uniform intervals, that is, each interval of a histogram should have the same width (10 in the above example). Finally, the bar for each interval is painted in a grey color, where the colors of the leftmost and the rightmost intervals are black and white, respectively, and the darkness of bars monotonically decreases at the same rate from left to right. For instance, in Figure 1, the darkness levels of the four bars are 1, 2/3, 1/3, and 0, respectively. In this problem, you are requested to estimate ink consumption when printing a histogram on paper. The amount of ink necessary to draw a bar is proportional to both its area and darkness. Input The input consists of multiple datasets, each of which contains integers and specifies a value table and intervals for the histogram generator, in the following format: nw v1 v2 . . . vn n is the total number of value occurrences for the histogram, and each of the n lines following the first line contains a single value. Note that the same value may possibly occur multiple times. w is the interval width. A value v is in the first (i.e. leftmost) interval if 0 ≤ v<w, the second one if w ≤ v< 2w, and so on. Note that the interval from 0 (inclusive) to w (exclusive) should be regarded as the leftmost even if no values occur in this interval. The last (i.e. rightmost) interval is the one that includes the largest value in the dataset. You may assume the following: 1 ≤ n ≤ 100 10 ≤ w ≤ 50 0 ≤ vi ≤ 100 for 1 ≤ i ≤ n You can also assume that the maximum value is no less than w. This means that the histogram has more than one interval. The end of the input is indicated by a line containing two zeros. Output For each dataset, output a line containing the amount of ink consumed in printing the histogram. One unit of ink is necessary to paint one highest bar black. Assume that 0.01 units of ink per histogram is consumed for various purposes except for painting bars such as drawing lines and characters (see Figure 1). For instance, the amount of ink consumed in printing the histogram in Figure 1 is: Each output value should be a 6 decimal fraction. Sample Input 3 50 100 0 100 3 50 100 100 50 10 10 1 2 3 4 5 16 17 18 29 30 0 0 Sample Output 0.510000 0.260000 1.476667
请问executablenotfound是什么问题?
如题,今天想用graphviz做一个决策树,结果就报错,我以为是graphviz没安装上,但是我发现没错啊,已经安装成功了,希望诸位大神帮一下忙 代码如下: ``` from sklearn import tree from sklearn.datasets import load_wine from sklearn.model_selection import train_test_split wine=load_wine() import pandas as pd pd.concat([pd.DataFrame(wine.data),pd.DataFrame(wine.target)],axis=1) Xtrain,Xtest,Ytrain,Ytest=train_test_split(wine.data,wine.target,test_size=0.3) clf=tree.DecisionTreeClassifier(criterion='entropy') clf=clf.fit(Xtrain,Ytrain) score=clf.score(Xtest,Ytest) feature_name=['酒精','苹果酸','灰','灰的碱性','镁','总酚','类黄酮','非黄烷类酚类', '花青素','颜色强度','色调','od280/od315稀释葡萄酒','脯氨酸'] import graphviz dot_data=tree.export_graphviz(clf ,feature_names=feature_name ,class_names=['琴酒','雪莉','贝尔摩德'] ,filled=True ,rounded=True ) graph=graphviz.Source(dot_data) graph ```
求助:torchvision框架处理cifar10数据集出错
1.在运行ganomaly模型的训练文件train.py时出错,按照报错信息应该是在对cifar10数据集进行处理时出错。具体报错截图如下: ![图片说明](https://img-ask.csdn.net/upload/201906/12/1560344374_556226.png) \n 2.报错信息中的data.py程序具体如下: ``` """ LOAD DATA from file. """ # pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915 ## import os import torch import numpy as np import torchvision.datasets as datasets from torchvision.datasets import MNIST from torchvision.datasets import CIFAR10 from torchvision.datasets import ImageFolder import torchvision.transforms as transforms ## def load_data(opt): """ Load Data Args: opt ([type]): Argument Parser Raises: IOError: Cannot Load Dataset Returns: [type]: dataloader """ ······ dataset['train'].train_data, dataset['train'].train_labels, \ dataset['test'].test_data, dataset['test'].test_labels = get_cifar_anomaly_dataset( trn_img=dataset['train'].train_data, trn_lbl=dataset['train'].train_labels, tst_img=dataset['test'].test_data, tst_lbl=dataset['test'].test_labels, abn_cls_idx=classes[opt.anomaly_class] ) ······ ## def get_cifar_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, abn_cls_idx=0, manualseed=-1): 3.是否是因为在调用get_cifar_anomaly_dataset()函数时传入的四个参数在之前没有定义? ```
爬虫福利二 之 妹子图网MM批量下载
爬虫福利一:27报网MM批量下载    点击 看了本文,相信大家对爬虫一定会产生强烈的兴趣,激励自己去学习爬虫,在这里提前祝:大家学有所成! 目标网站:妹子图网 环境:Python3.x 相关第三方模块:requests、beautifulsoup4 Re:各位在测试时只需要将代码里的变量 path 指定为你当前系统要保存的路径,使用 python xxx.py 或IDE运行即可。
Java学习的正确打开方式
在博主认为,对于入门级学习java的最佳学习方法莫过于视频+博客+书籍+总结,前三者博主将淋漓尽致地挥毫于这篇博客文章中,至于总结在于个人,实际上越到后面你会发现学习的最好方式就是阅读参考官方文档其次就是国内的书籍,博客次之,这又是一个层次了,这里暂时不提后面再谈。博主将为各位入门java保驾护航,各位只管冲鸭!!!上天是公平的,只要不辜负时间,时间自然不会辜负你。 何谓学习?博主所理解的学习,它
程序员必须掌握的核心算法有哪些?
由于我之前一直强调数据结构以及算法学习的重要性,所以就有一些读者经常问我,数据结构与算法应该要学习到哪个程度呢?,说实话,这个问题我不知道要怎么回答你,主要取决于你想学习到哪些程度,不过针对这个问题,我稍微总结一下我学过的算法知识点,以及我觉得值得学习的算法。这些算法与数据结构的学习大多数是零散的,并没有一本把他们全部覆盖的书籍。下面是我觉得值得学习的一些算法以及数据结构,当然,我也会整理一些看过
大学四年自学走来,这些私藏的实用工具/学习网站我贡献出来了
大学四年,看课本是不可能一直看课本的了,对于学习,特别是自学,善于搜索网上的一些资源来辅助,还是非常有必要的,下面我就把这几年私藏的各种资源,网站贡献出来给你们。主要有:电子书搜索、实用工具、在线视频学习网站、非视频学习网站、软件下载、面试/求职必备网站。 注意:文中提到的所有资源,文末我都给你整理好了,你们只管拿去,如果觉得不错,转发、分享就是最大的支持了。 一、PDF搜索网站推荐 对于大部
linux系列之常用运维命令整理笔录
本博客记录工作中需要的linux运维命令,大学时候开始接触linux,会一些基本操作,可是都没有整理起来,加上是做开发,不做运维,有些命令忘记了,所以现在整理成博客,当然vi,文件操作等就不介绍了,慢慢积累一些其它拓展的命令,博客不定时更新 顺便拉下票,我在参加csdn博客之星竞选,欢迎投票支持,每个QQ或者微信每天都可以投5票,扫二维码即可,http://m234140.nofollow.ax.
Vue + Spring Boot 项目实战(十四):用户认证方案与完善的访问拦截
本篇文章主要讲解 token、session 等用户认证方案的区别并分析常见误区,以及如何通过前后端的配合实现完善的访问拦截,为下一步权限控制的实现打下基础。
比特币原理详解
一、什么是比特币 比特币是一种电子货币,是一种基于密码学的货币,在2008年11月1日由中本聪发表比特币白皮书,文中提出了一种去中心化的电子记账系统,我们平时的电子现金是银行来记账,因为银行的背后是国家信用。去中心化电子记账系统是参与者共同记账。比特币可以防止主权危机、信用风险。其好处不多做赘述,这一层面介绍的文章很多,本文主要从更深层的技术原理角度进行介绍。 二、问题引入  假设现有4个人
程序员接私活怎样防止做完了不给钱?
首先跟大家说明一点,我们做 IT 类的外包开发,是非标品开发,所以很有可能在开发过程中会有这样那样的需求修改,而这种需求修改很容易造成扯皮,进而影响到费用支付,甚至出现做完了项目收不到钱的情况。 那么,怎么保证自己的薪酬安全呢? 我们在开工前,一定要做好一些证据方面的准备(也就是“讨薪”的理论依据),这其中最重要的就是需求文档和验收标准。一定要让需求方提供这两个文档资料作为开发的基础。之后开发
网页实现一个简单的音乐播放器(大佬别看。(⊙﹏⊙))
今天闲着无事,就想写点东西。然后听了下歌,就打算写个播放器。 于是乎用h5 audio的加上js简单的播放器完工了。 欢迎 改进 留言。 演示地点跳到演示地点 html代码如下`&lt;!DOCTYPE html&gt; &lt;html&gt; &lt;head&gt; &lt;title&gt;music&lt;/title&gt; &lt;meta charset="utf-8"&gt
Python十大装B语法
Python 是一种代表简单思想的语言,其语法相对简单,很容易上手。不过,如果就此小视 Python 语法的精妙和深邃,那就大错特错了。本文精心筛选了最能展现 Python 语法之精妙的十个知识点,并附上详细的实例代码。如能在实战中融会贯通、灵活使用,必将使代码更为精炼、高效,同时也会极大提升代码B格,使之看上去更老练,读起来更优雅。 1. for - else 什么?不是 if 和 else 才
数据库优化 - SQL优化
前面一篇文章从实例的角度进行数据库优化,通过配置一些参数让数据库性能达到最优。但是一些“不好”的SQL也会导致数据库查询变慢,影响业务流程。本文从SQL角度进行数据库优化,提升SQL运行效率。 判断问题SQL 判断SQL是否有问题时可以通过两个表象进行判断: 系统级别表象 CPU消耗严重 IO等待严重 页面响应时间过长
2019年11月中国大陆编程语言排行榜
2019年11月2日,我统计了某招聘网站,获得有效程序员招聘数据9万条。针对招聘信息,提取编程语言关键字,并统计如下: 编程语言比例 rank pl_ percentage 1 java 33.62% 2 c/c++ 16.42% 3 c_sharp 12.82% 4 javascript 12.31% 5 python 7.93% 6 go 7.25% 7
通俗易懂地给女朋友讲:线程池的内部原理
餐厅的约会 餐盘在灯光的照耀下格外晶莹洁白,女朋友拿起红酒杯轻轻地抿了一小口,对我说:“经常听你说线程池,到底线程池到底是个什么原理?”我楞了一下,心里想女朋友今天是怎么了,怎么突然问出这么专业的问题,但做为一个专业人士在女朋友面前也不能露怯啊,想了一下便说:“我先给你讲讲我前同事老王的故事吧!” 大龄程序员老王 老王是一个已经北漂十多年的程序员,岁数大了,加班加不动了,升迁也无望,于是拿着手里
经典算法(5)杨辉三角
写在前面: 我是 扬帆向海,这个昵称来源于我的名字以及女朋友的名字。我热爱技术、热爱开源、热爱编程。技术是开源的、知识是共享的。 这博客是对自己学习的一点点总结及记录,如果您对 Java、算法 感兴趣,可以关注我的动态,我们一起学习。 用知识改变命运,让我们的家人过上更好的生活。 目录一、杨辉三角的介绍二、杨辉三角的算法思想三、代码实现1.第一种写法2.第二种写法 一、杨辉三角的介绍 百度
腾讯算法面试题:64匹马8个跑道需要多少轮才能选出最快的四匹?
昨天,有网友私信我,说去阿里面试,彻底的被打击到了。问了为什么网上大量使用ThreadLocal的源码都会加上private static?他被难住了,因为他从来都没有考虑过这个问题。无独有偶,今天笔者又发现有网友吐槽了一道腾讯的面试题,我们一起来看看。 腾讯算法面试题:64匹马8个跑道需要多少轮才能选出最快的四匹? 在互联网职场论坛,一名程序员发帖求助到。二面腾讯,其中一个算法题:64匹
面试官:你连RESTful都不知道我怎么敢要你?
面试官:了解RESTful吗? 我:听说过。 面试官:那什么是RESTful? 我:就是用起来很规范,挺好的 面试官:是RESTful挺好的,还是自我感觉挺好的 我:都挺好的。 面试官:… 把门关上。 我:… 要干嘛?先关上再说。 面试官:我说出去把门关上。 我:what ?,夺门而去 文章目录01 前言02 RESTful的来源03 RESTful6大原则1. C-S架构2. 无状态3.统一的接
为啥国人偏爱Mybatis,而老外喜欢Hibernate/JPA呢?
关于SQL和ORM的争论,永远都不会终止,我也一直在思考这个问题。昨天又跟群里的小伙伴进行了一番讨论,感触还是有一些,于是就有了今天这篇文。 声明:本文不会下关于Mybatis和JPA两个持久层框架哪个更好这样的结论。只是摆事实,讲道理,所以,请各位看官勿喷。 一、事件起因 关于Mybatis和JPA孰优孰劣的问题,争论已经很多年了。一直也没有结论,毕竟每个人的喜好和习惯是大不相同的。我也看
SQL-小白最佳入门sql查询一
一 说明 如果是初学者,建议去网上寻找安装Mysql的文章安装,以及使用navicat连接数据库,以后的示例基本是使用mysql数据库管理系统; 二 准备前提 需要建立一张学生表,列分别是id,名称,年龄,学生信息;本示例中文章篇幅原因SQL注释略; 建表语句: CREATE TABLE `student` ( `id` int(11) NOT NULL AUTO_INCREMENT, `
项目中的if else太多了,该怎么重构?
介绍 最近跟着公司的大佬开发了一款IM系统,类似QQ和微信哈,就是聊天软件。我们有一部分业务逻辑是这样的 if (msgType = "文本") { // dosomething } else if(msgType = "图片") { // doshomething } else if(msgType = "视频") { // doshomething } else { // dosho
“狗屁不通文章生成器”登顶GitHub热榜,分分钟写出万字形式主义大作
一、垃圾文字生成器介绍 最近在浏览GitHub的时候,发现了这样一个骨骼清奇的雷人项目,而且热度还特别高。 项目中文名:狗屁不通文章生成器 项目英文名:BullshitGenerator 根据作者的介绍,他是偶尔需要一些中文文字用于GUI开发时测试文本渲染,因此开发了这个废话生成器。但由于生成的废话实在是太过富于哲理,所以最近已经被小伙伴们给玩坏了。 他的文风可能是这样的: 你发现,
程序员:我终于知道post和get的区别
IT界知名的程序员曾说:对于那些月薪三万以下,自称IT工程师的码农们,其实我们从来没有把他们归为我们IT工程师的队伍。他们虽然总是以IT工程师自居,但只是他们一厢情愿罢了。 此话一出,不知激起了多少(码农)程序员的愤怒,却又无可奈何,于是码农问程序员。 码农:你知道get和post请求到底有什么区别? 程序员:你看这篇就知道了。 码农:你月薪三万了? 程序员:嗯。 码农:你是怎么做到的? 程序员:
《程序人生》系列-这个程序员只用了20行代码就拿了冠军
你知道的越多,你不知道的越多 点赞再看,养成习惯GitHub上已经开源https://github.com/JavaFamily,有一线大厂面试点脑图,欢迎Star和完善 前言 这一期不算《吊打面试官》系列的,所有没前言我直接开始。 絮叨 本来应该是没有这期的,看过我上期的小伙伴应该是知道的嘛,双十一比较忙嘛,要值班又要去帮忙拍摄年会的视频素材,还得搞个程序员一天的Vlog,还要写BU
加快推动区块链技术和产业创新发展,2019可信区块链峰会在京召开
      11月8日,由中国信息通信研究院、中国通信标准化协会、中国互联网协会、可信区块链推进计划联合主办,科技行者协办的2019可信区块链峰会将在北京悠唐皇冠假日酒店开幕。   区块链技术被认为是继蒸汽机、电力、互联网之后,下一代颠覆性的核心技术。如果说蒸汽机释放了人类的生产力,电力解决了人类基本的生活需求,互联网彻底改变了信息传递的方式,区块链作为构造信任的技术有重要的价值。   1
程序员把地府后台管理系统做出来了,还有3.0版本!12月7号最新消息:已在开发中有github地址
第一幕:缘起 听说阎王爷要做个生死簿后台管理系统,我们派去了一个程序员…… 996程序员做的梦: 第一场:团队招募 为了应对地府管理危机,阎王打算找“人”开发一套地府后台管理系统,于是就在地府总经办群中发了项目需求。 话说还是中国电信的信号好,地府都是满格,哈哈!!! 经常会有外行朋友问:看某网站做的不错,功能也简单,你帮忙做一下? 而这次,面对这样的需求,这个程序员
Android 9.0系统新特性,对刘海屏设备进行适配
其实Android 9.0系统已经是去年推出的“老”系统了,这个系统中新增了一个比较重要的特性,就是对刘海屏设备进行了支持。一直以来我也都有打算针对这个新特性好好地写一篇文章,但是为什么直到拖到了Android 10.0系统都发布了才开始写这篇文章呢?当然,一是因为我这段时间确实比较忙,今年几乎绝大部分的业余时间都放到写新书上了。但是最主要的原因并不是这个,而是因为刘海屏设备的适配存在一定的特殊性
网易云6亿用户音乐推荐算法
网易云音乐是音乐爱好者的集聚地,云音乐推荐系统致力于通过 AI 算法的落地,实现用户千人千面的个性化推荐,为用户带来不一样的听歌体验。 本次分享重点介绍 AI 算法在音乐推荐中的应用实践,以及在算法落地过程中遇到的挑战和解决方案。 将从如下两个部分展开: AI 算法在音乐推荐中的应用 音乐场景下的 AI 思考 从 2013 年 4 月正式上线至今,网易云音乐平台持续提供着:乐屏社区、UGC
【技巧总结】位运算装逼指南
位算法的效率有多快我就不说,不信你可以去用 10 亿个数据模拟一下,今天给大家讲一讲位运算的一些经典例子。不过,最重要的不是看懂了这些例子就好,而是要在以后多去运用位运算这些技巧,当然,采用位运算,也是可以装逼的,不信,你往下看。我会从最简单的讲起,一道比一道难度递增,不过居然是讲技巧,那么也不会太难,相信你分分钟看懂。 判断奇偶数 判断一个数是基于还是偶数,相信很多人都做过,一般的做法的代码如下
日均350000亿接入量,腾讯TubeMQ性能超过Kafka
整理 | 夕颜出品 | AI科技大本营(ID:rgznai100) 【导读】近日,腾讯开源动作不断,相继开源了分布式消息中间件TubeMQ,基于最主流的 OpenJDK8开发的
8年经验面试官详解 Java 面试秘诀
    作者 | 胡书敏 责编 | 刘静 出品 | CSDN(ID:CSDNnews) 本人目前在一家知名外企担任架构师,而且最近八年来,在多家外企和互联网公司担任Java技术面试官,前后累计面试了有两三百位候选人。在本文里,就将结合本人的面试经验,针对Java初学者、Java初级开发和Java开发,给出若干准备简历和准备面试的建议。   Java程序员准备和投递简历的实
面试官如何考察你的思维方式?
1.两种思维方式在求职面试中,经常会考察这种问题:北京有多少量特斯拉汽车? 某胡同口的煎饼摊一年能卖出多少个煎饼? 深圳有多少个产品经理? 一辆公交车里能装下多少个乒乓球? 一
碎片化的时代,如何学习
今天周末,和大家聊聊学习这件事情。 在如今这个社会,我们的时间被各类 APP 撕的粉碎。 刷知乎、刷微博、刷朋友圈; 看论坛、看博客、看公号; 等等形形色色的信息和知识获取方式一个都不错过。 貌似学了很多,但是却感觉没什么用。 要解决上面这些问题,首先要分清楚一点,什么是信息,什么是知识。 那什么是信息呢? 你一切听到的、看到的,都是信息,比如微博上的明星出轨、微信中的表情大战、抖音上的...
相关热词 c# 二进制截断字符串 c#实现窗体设计器 c#检测是否为微信 c# plc s1200 c#里氏转换原则 c# 主界面 c# do loop c#存为组套 模板 c# 停掉协程 c# rgb 读取图片
立即提问

相似问题

3
吴恩达深度学习第四课第四周fr_utils.py报错,有人遇到过吗
1
如何可视化tensorflow版的fater rcnn的训练过程?
1
程序运行到一半自动停止却不报错
2
bad input shape (60000, 2)
0
加载sklearn外部数据集时无法停止程序
0
报错ImportError: cannot import name 'BaseEstimator'
0
在旧版本R中利用代码升级R后,安装包路径改变无法识别。
0
通过CountVectorizer和chi2特征提取,进行文本分类,准确率只有0.34正常吗
0
ubuntu16.04安装opencv时,make不通过该怎么办?
1
TypeError: expected str, bytes or os.PathLike object, not tuple此报错有大神遇到过吗怎么解决
2
keras 训练 IMDB数据 为什么预测的是正面情感?
0
python机器学习+=与=+
2
求mnist多数字识别,修改完成我的代码
0
根据规则扩展二叉树的实现原理,使用C语言的程序编写设计思想的方式怎么完成
0
求助:torchvision框架处理cifar10数据集出错
0
数组对称的旋转的一个算法问题,是如何采用C语言的程序的编写设计的思想的方式去有效实现的
0
错误的曲线的标注问题,怎么使用C语言的程序的编写过程的方式有效实现的代码怎么写
0
登录和注销的顺序的解析问题,怎么采用C语言来回答这个问题,具体的代码?
0
立方体的一个遍历的问题求解,怎么采用C语言的程序的设计的技术的编写的程序怎么做
0
挖掘出的手表中引出灾难发生的最合理的时间间隔的计算怎么用C语言编写程序的方式实现