'Datasets' object has no attribute 'train_step'

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os

BATAH_SIZE = 200
LEARNING_RATE_BASE = 0.1
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 50000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"

def backward(mnist):

x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y_ = tf.placeholder(tf.float32, [None, mnist_forward.OUTPUT_NODE])
y = mnist_forward.forward(x, REGULARIZER)
global_step = tf.Variable(0, trainable=False)

ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1))
cem = tf.reduce_mean(ce)
loss = cem + tf.add_n(tf.get_collection('losses'))

learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATAH_SIZE,
                                           LEARNING_RATE_DECAY, staircase=True)

train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_step, ema_op]):
    train_op = tf.no_op(name='train')

saver = tf.train.Saver()

with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    for i in range(STEPS):
        xs, ys = mnist.train_step.next_batch(BATAH_SIZE)
        _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
        if i % 1000 == 0:
            print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
            saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

def main():
mnist = input_data.read_data_sets("./data/", one_hot=True)
backward(mnist)

if name == '__main__':
main()

运行程序后报错:

File "C:/Users/98382/PycharmProjects/minst/mnist_backward.py", line 54, in
main()
File "C:/Users/98382/PycharmProjects/minst/mnist_backward.py", line 51, in main
backward(mnist)
File "C:/Users/98382/PycharmProjects/minst/mnist_backward.py", line 43, in backward
xs, ys = mnist.train_step.next_batch(BATAH_SIZE)
AttributeError: 'Datasets' object has no attribute 'train_step'

2个回答

请把xs, ys = mnist.train_step.next_batch(BATAH_SIZE)修改为xs, ys = mnist.train.next_batch(BATAH_SIZE),就可以正常运行了

可以查看mnist的属性,如果没有train _step,可以找并且属性里有next_batch(),程序就可以了

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
运行mixmatch源码CIFAR10数据集时报错AttributeError: 'CIFAR10' object has no attribute 'targets',是怎么回事?

1. 在运行mixmatch程序的时候,用torchvision.datasets载入CIFAT10的时候出现AttributeError: 'CIFAR10' object has no attribute 'targets',错误 还有一个问题就是:由于用torchvision下载太慢,我先把数据集下下来了,然后放在了data目录下面,这个对结果会有影响嘛? 希望大家可以给点建议和意见,谢谢。 加载数据集的代码如下: ``` def get_cifar10(root, n_labeled, transform_train=None, transform_val=None, download=True): base_dataset = torchvision.datasets.CIFAR10(root, train=True, target_transform=True, download=download,) train_labeled_idxs, train_unlabeled_idxs, val_idxs = train_val_split(base_dataset.targets, int(n_labeled/10)) train_labeled_dataset = CIFAR10_labeled(root, train_labeled_idxs, train=True, transform=transform_train) train_unlabeled_dataset = CIFAR10_unlabeled(root, train_unlabeled_idxs, train=True, transform=TransformTwice(transform_train)) val_dataset = CIFAR10_labeled(root, val_idxs, train=True, transform=transform_val, download=True) test_dataset = CIFAR10_labeled(root, train=False, transform=transform_val, download=True) print (f"#Labeled: {len(train_labeled_idxs)} #Unlabeled: {len(train_unlabeled_idxs)} #Val: {len(val_idxs)}") return train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset ``` ``` def train_val_split(labels, n_labeled_per_class): labels = np.array(labels) train_labeled_idxs = [] train_unlabeled_idxs = [] val_idxs = [] for i in range(10): idxs = np.where(labels == i)[0] np.random.shuffle(idxs) train_labeled_idxs.extend(idxs[:n_labeled_per_class]) train_unlabeled_idxs.extend(idxs[n_labeled_per_class:-500]) val_idxs.extend(idxs[-500:]) np.random.shuffle(train_labeled_idxs) np.random.shuffle(train_unlabeled_idxs) np.random.shuffle(val_idxs) return train_labeled_idxs, train_unlabeled_idxs, val_idxs ``` 错误信息如下 (base) D:\CSStudy\PycharmProject\MixMatch-pytorch-master>python train.py --gpu 0 --n-labeled 250 --out cifar10@250 ==> Preparing cifar10 Using downloaded and verified file: ./data\cifar-10-python.tar.gz Traceback (most recent call last): File "train.py", line 431, in <module> main() File "train.py", line 88, in main train_labeled_set, train_unlabeled_set, val_set, test_set = dataset.get_cifar10('./data', args.n_labeled, transform_train=transform_train, transf orm_val=transform_val) File "D:\CSStudy\PycharmProject\MixMatch-pytorch-master\dataset\cifar10.py", line 21, in get_cifar10 train_labeled_idxs, train_unlabeled_idxs, val_idxs = train_val_split(base_dataset.targets, int(n_labeled/10)) AttributeError: 'CIFAR10' object has no attribute 'targets'

theano 报错 module 'configparser' has no attribute 'ConfigParser' 用的是Anaconda3 python3.6

>theano 报错 module 'configparser' has no attribute 'ConfigParser' 用的是Win10 Anaconda3 python3.6 ``` from sklearn.datasets import load_boston import theano.tensor as T import numpy as np import matplotlib.pyplot as plt import theano class Layer(object): def __init__(self,inputs,in_size,out_size,activation_function=None): self.W = theano.shared(np.random.normal(0,1,(in_size,out_size))) self.b = theano.shared(np.zeros((out_size,)) + 0.1) self.Wx_plus_b = T.dot(inputs, self.W) + self.b self.activation_function = activation_function if activation_function is None: self.outputs = self.Wx_plus_b else: self.outputs = self.activation_function(self.Wx_plus_b) def minmax_normalization(data): xs_max = np.max(data, axis=0) xs_min = np.min(data, axis=0) xs = (1-0)*(data - xs_min)/(xs_max - xs_min) + 0 return xs np.random.seed(100) x_dataset = load_boston() x_data = x_dataset.data # minmax normalization, rescale the inputs x_data = minmax_normalization(x_data) y_data = x_dataset.target[:,np.newaxis] #cross validation, train test data split x_train, y_train = x_data[:400], y_data[:400] x_test, y_test = x_data[400:], y_data[400:] x = T.dmatrix('x') y = T.dmatrix('y') l1 = Layer(x, 13, 50, T.tanh) l2 = Layer(l1.outputs, 50, 1, None) #compute cost cost = T.mean(T.square(l2.outputs - y)) #cost = T.mean(T.square(l2.outputs - y)) + 0.1*((l1.W**2).sum() + (l2.W**2).sum()) #l2 regulization #cost = T.mean(T.square(l2.outputs - y)) + 0.1*(abs(l1.W).sum() + abs(l2.W).sum()) #l1 regulization gW1, gb1, gW2, gb2 = T.grad(cost, [l1.W,l1.b,l2.W,l2.b]) #gradient descend learning_rate = 0.01 train = theano.function(inputs=[x,y], updates=[(l1.W,l1.W-learning_rate*gW1), (l1.b,l1.b-learning_rate*gb1), (l2.W,l2.W-learning_rate*gW2), (l2.b,l2.b-learning_rate*gb2)]) compute_cost = theano.function(inputs=[x,y], outputs=cost) #record cost train_err_list = [] test_err_list = [] learning_time = [] for i in range(1000): if 1%10 == 0: #record cost train_err_list.append(compute_cost(x_train,y_train)) test_err_list.append(compute_cost(x_test,y_test)) learning_time.append(i) #plot cost history plt.plot(learning_time, train_err_list, 'r-') plt.plot(learning_time, test_err_list,'b--') plt.show() #作者 morvan莫凡 https://morvanzhou.github.io ``` 报错了: Traceback (most recent call last): File "C:/Users/Elena/PycharmProjects/theano/regularization.py", line 1, in <module> from sklearn.datasets import load_boston File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\datasets\__init__.py", line 22, in <module> from .twenty_newsgroups import fetch_20newsgroups File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\datasets\twenty_newsgroups.py", line 44, in <module> from ..feature_extraction.text import CountVectorizer File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\feature_extraction\__init__.py", line 10, in <module> from . import text File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py", line 28, in <module> from ..preprocessing import normalize File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\preprocessing\__init__.py", line 6, in <module> from ._function_transformer import FunctionTransformer File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\preprocessing\_function_transformer.py", line 5, in <module> from ..utils.testing import assert_allclose_dense_sparse File "C:\Users\Elena\Anaconda3\lib\site-packages\sklearn\utils\testing.py", line 61, in <module> from nose.tools import raises as _nose_raises File "C:\Users\Elena\Anaconda3\lib\site-packages\nose\__init__.py", line 1, in <module> from nose.core import collector, main, run, run_exit, runmodule File "C:\Users\Elena\Anaconda3\lib\site-packages\nose\core.py", line 11, in <module> from nose.config import Config, all_config_files File "C:\Users\Elena\Anaconda3\lib\site-packages\nose\config.py", line 6, in <module> import configparser File "C:\Users\Elena\Anaconda3\Lib\site-packages\theano\configparser.py", line 15, in <module> import theano File "C:\Users\Elena\Anaconda3\lib\site-packages\theano\__init__.py", line 88, in <module> from theano.configdefaults import config File "C:\Users\Elena\Anaconda3\lib\site-packages\theano\configdefaults.py", line 17, in <module> from theano.configparser import (AddConfigVar, BoolParam, ConfigParam, EnumStr, File "C:\Users\Elena\Anaconda3\lib\site-packages\theano\configparser.py", line 77, in <module> theano_cfg = (configparser.ConfigParser if PY3 **AttributeError: module 'configparser' has no attribute 'ConfigParser**' 把theano里的configparser.py文件里的ConfigParser改成了configparser还是不行 换了模块import configparsor也不行。。。![图片说明](https://img-ask.csdn.net/upload/201909/30/1569832318_223436.png)

求助:torchvision框架处理cifar10数据集出错

1.在运行ganomaly模型的训练文件train.py时出错,按照报错信息应该是在对cifar10数据集进行处理时出错。具体报错截图如下: ![图片说明](https://img-ask.csdn.net/upload/201906/12/1560344374_556226.png) \n 2.报错信息中的data.py程序具体如下: ``` """ LOAD DATA from file. """ # pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915 ## import os import torch import numpy as np import torchvision.datasets as datasets from torchvision.datasets import MNIST from torchvision.datasets import CIFAR10 from torchvision.datasets import ImageFolder import torchvision.transforms as transforms ## def load_data(opt): """ Load Data Args: opt ([type]): Argument Parser Raises: IOError: Cannot Load Dataset Returns: [type]: dataloader """ ······ dataset['train'].train_data, dataset['train'].train_labels, \ dataset['test'].test_data, dataset['test'].test_labels = get_cifar_anomaly_dataset( trn_img=dataset['train'].train_data, trn_lbl=dataset['train'].train_labels, tst_img=dataset['test'].test_data, tst_lbl=dataset['test'].test_labels, abn_cls_idx=classes[opt.anomaly_class] ) ······ ## def get_cifar_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, abn_cls_idx=0, manualseed=-1): 3.是否是因为在调用get_cifar_anomaly_dataset()函数时传入的四个参数在之前没有定义? ```

python import 和from import 区别

上网查了一下,貌似是一个简单问题,网上都是这么说的,我之前也是这么理解的: 假设有一个包名字叫 numpy, 里面有一个函数 zeros 1、 from numpy import zeros 那么你可以直接调用 zeros()函数 2、import numpy.zeros 那么你只能使用全名 numpy.zeros 但今天遇到了问题了。 ``` from sklearn import datasets a=datasets.load_digits() print(a) ``` 这个代码没问题 但是下面这个代码居然提示错误 ``` import sklearn a=sklearn.datasets.load_digits() print(a) ``` 提示错误是:AttributeError: module 'sklearn' has no attribute 'datasets' 什么鬼,看不懂,不是说import sklearn 后应该可以通过点.来访问sklearn里的所有内容吗,就像numpy中那样。初学者,不懂,求大神解释下,不胜感激!! 这样也报错 ``` import sklearn.datasets a=datasets.load_digits() print(a) ``` NameError: name 'datasets' is not defined

Pytorch CiFar10 , Lenet5 ,损失函数不收敛

问题 同标题 import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision import torchvision.transforms as transforms import torchvision.datasets as datasets import time transform = transforms.Compose([transforms.ToTensor(),#把数据转换成tensor transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5)) #数据归一化 ,第一组参数是所有channel的平均值,第2组参数是方差 # 为什么需要平均值的原因是 ,所有图片都有的 ,数据不需要学习,因为平均值都在每张图片里了是干扰项目 ]) train_data = datasets.CIFAR10('./data',train=True,transform=transform,download=True) train_loader = torch.utils.data.DataLoader(dataset=train_data,batch_size=4,shuffle=True,num_workers=2) test_data = datasets.CIFAR10('./test',train=False,transform=transform,download=True) test_loader = torch.utils.data.DataLoader(dataset=test_data,batch_size=4,shuffle=True,num_workers=2) import matplotlib.pyplot as plt import numpy as np def imgsshow(img): ##? 这里有个包升级的问题 ,导致目前没有解决 img = img/2 + 0.5 #由于在归一化 transform里有去掉平均的处理 在这里需要变回来 img = img.numpy() # numpay array img = np.transpose(img,(1,2,0)) # (c,h,w) -> (h,w,c) 符合正常显示的数据的方式 plt.show(img) data_iter = iter(train_loader) #随机加载一个min batch images , labels = data_iter.next() # 把图片 和 label 分开 class Net(nn.Module): def __init__(self): super(Net,self).__init__() # LeCun # N = (input_size - kernel_size + 2 padding)/stripe +1 self.conv1 = nn.Sequential(nn.Conv2d(3,6,5,1), # out = (32 -5 + 2*0)/1+1 =28 nn.ReLU(), nn.MaxPool2d(kernel_size=2,stride=2)) # out = 14 self.conv2 = nn.Sequential( nn.Conv2d(6,16,5), # out = (14 -5 +0)1 +1 =10 nn.ReLU(), nn.MaxPool2d(kernel_size=2,stride =2) # out = 10/2 = 5 ) self.fc1 = nn.Sequential( nn.Linear(16*5*5,120), nn.ReLU() ) self.fc2 = nn.Sequential( nn.Linear(120,84), nn.ReLU() ) self.fc3 = nn.Linear(84,10) def forward(self, x): x = self.conv1(x) x = self.conv2(x) x = x.view(-1, 16*5*5) print ("%%"*50) print ('dimision change',x.shape) x = self.fc1(x) x = self.fc2(x) x = self.fc3(x) return x net = Net() print("current net is ",net) criterion = nn.CrossEntropyLoss() tunner = optim.SGD(net.parameters(),lr=0.0001,momentum=0.9) traning_loss_history = [] test_loss_history = [] for epoch in range(1000): net.train() running_loss = 0.0 print ("training.... epoch{0}".format(epoch)) start_epoc = time.time() for i ,data in enumerate(train_loader,0): #一次仅仅提取一个minbatch ,一致到所有的数据取完 batch_time = time.time() imges, labels = data # imges, labels = Variable(images),Variable(labels) tunner.zero_grad() outs = net(images) loss = criterion(outs, labels) # tunner.zero_grad() loss.backward() tunner.step() if i % 1000 == 0: print ("i {0} : loss {1} : duration {2}".format(i, loss.item(), (time.time()-batch_time))) running_loss += loss.item() if i%250 == 0 : net.eval() with torch.no_grad(): for data in test_loader: test_images ,test_label = data test_outs = net(test_images) test_loss = criterion(test_outs,test_label) traning_loss_history .append(running_loss/250) test_loss_history.append(test_loss.item()) running_loss = 0.0 print("epoch {0} :: loss {1} :: duration {2}".format(epoch,loss.item(),time.time()-start_epoc)) # 为什么损失函数会有regression呢 ? 原因 1 ,minbatch的偶然性导致 ,比如这几次都是飞机 2, learning rrate {} plt.figure() plt.plot(traning_loss_history) plt.plot(test_loss_history) plt.legend('training loss','test loss') plt.tile("Traing /Test loss") plt.xlabel('#mini_batch *250') plt.ylabel('Loss')

pip20.0.2使用以及sklear能使用出现module 'pytest' has no attribute 'mark'?

问题一:1、在使用pip20.0.2时出现以下问题,可能是由于更新了所有的包出现的 Traceback (most recent call last): File "C:\Users\user\Anaconda3\Scripts\pip-script.py", line 11, in <module> load_entry_point('pip==20.0.2', 'console_scripts', 'pip')() File "C:\Users\user\Anaconda3\lib\site-packages\pip\_internal\cli\main.py", line 73, in main command = create_command(cmd_name, isolated=("--isolated" in cmd_args)) File "C:\Users\user\Anaconda3\lib\site-packages\pip\_internal\commands\__init__.py", line 96, in create_command module = importlib.import_module(module_path) File "C:\Users\user\Anaconda3\lib\importlib\__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1006, in _gcd_import File "<frozen importlib._bootstrap>", line 983, in _find_and_load File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 677, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 728, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "C:\Users\user\Anaconda3\lib\site-packages\pip\_internal\commands\uninstall.py", line 9, in <module> from pip._internal.cli.req_command import SessionCommandMixin File "C:\Users\user\Anaconda3\lib\site-packages\pip\_internal\cli\req_command.py", line 15, in <module> from pip._internal.index.package_finder import PackageFinder File "C:\Users\user\Anaconda3\lib\site-packages\pip\_internal\index\package_finder.py", line 21, in <module> from pip._internal.index.collector import parse_links File "C:\Users\user\Anaconda3\lib\site-packages\pip\_internal\index\collector.py", line 12, in <module> from pip._vendor import html5lib, requests File "C:\Users\user\Anaconda3\lib\site-packages\pip\_vendor\requests\__init__.py", line 44, in <module> from pip._vendor import chardet File "C:\Users\user\Anaconda3\lib\site-packages\pip\_vendor\chardet\__init__.py", line 20, in <module> from .universaldetector import UniversalDetector File "C:\Users\xuning\Anaconda3\lib\site-packages\pip\_vendor\chardet\universaldetector.py", line 48, in <module> from .sbcsgroupprober import SBCSGroupProber File "C:\Users\user\Anaconda3\lib\site-packages\pip\_vendor\chardet\sbcsgroupprober.py", line 37, in <module> from .langthaimodel import TIS620ThaiModel ImportError: cannot import name 'TIS620ThaiModel' from 'pip._vendor.chardet.langthaimodel' (C:\Users\xuning\Anaconda3\lib\site-packages\pip\_vendor\chardet\langthaimodel.py) 问题2、调用sklearn时出现以下问题 Traceback (most recent call last): File "C:/Users/user/PycharmProjects/machine learning/8.4cluster.py", line 1, in <module> from sklearn.datasets import load_iris File "C:\Users\user\Anaconda3\lib\site-packages\sklearn\datasets\__init__.py", line 22, in <module> from .twenty_newsgroups import fetch_20newsgroups File "C:\Users\user\Anaconda3\lib\site-packages\sklearn\datasets\twenty_newsgroups.py", line 44, in <module> from ..feature_extraction.text import CountVectorizer File "C:\Users\user\Anaconda3\lib\site-packages\sklearn\feature_extraction\__init__.py", line 10, in <module> from . import text File "C:\Users\user\Anaconda3\lib\site-packages\sklearn\feature_extraction\text.py", line 28, in <module> from ..preprocessing import normalize File "C:\Users\user\Anaconda3\lib\site-packages\sklearn\preprocessing\__init__.py", line 6, in <module> from ._function_transformer import FunctionTransformer File "C:\Users\user\Anaconda3\lib\site-packages\sklearn\preprocessing\_function_transformer.py", line 5, in <module> from ..utils.testing import assert_allclose_dense_sparse File "C:\Users\user\Anaconda3\lib\site-packages\sklearn\utils\testing.py", line 720, in <module> skip_if_32bit = pytest.mark.skipif(_IS_32BIT, AttributeError: module 'pytest' has no attribute 'mark'

tensorflow中datasets.map()报错

如下图所示,其功能是将代码封装进dataset并转化我可迭代格式,但是在执行预处理map()函数的时候报错: ``` ValueError: Tensor conversion requested dtype float32 for Tensor with dtype uint8: 'Tensor("arg0:0", shape=(28, 28), dtype=uint8)' ``` 然而在删除map后正常运行,说明不是转换格式的问题,求问各位大神这是为什么呢? 报错代码: ``` (x,y),(x_val,y_val)=datasets.mnist.load_data() def trans(x,y): x=tf.convert_to_tensor(x,dtype=tf.float32) y=tf.convert_to_tensor(y,dtype=tf.int32) y=tf.one_hot(y,depth=10) return x,y train_db=tf.data.Dataset.from_tensor_slices((x,y)) train_db.map(trans) train_db.shuffle(10000).batch(32) ``` 正常运行: ``` (x,y),(x_val,y_val)=datasets.mnist.load_data() x=tf.convert_to_tensor(x,dtype=tf.float32) y=tf.convert_to_tensor(y,dtype=tf.int32) y=tf.one_hot(y,depth=10) train_db=tf.data.Dataset.from_tensor_slices((x,y)) train_db.shuffle(10000).batch(32) ```

fashion_mnist识别准确率问题

fashion_mnist识别准确率一般为多少呢?我看好多人都是92%左右,但是我用一个网络达到了94%,想问问做过的小伙伴到底是多少? ``` #这是我的结果示意 x_shape: (60000, 28, 28) y_shape: (60000,) epoches: 0 val_acc: 0.4991 train_acc 0.50481665 epoches: 1 val_acc: 0.6765 train_acc 0.66735 epoches: 2 val_acc: 0.755 train_acc 0.7474 epoches: 3 val_acc: 0.7846 train_acc 0.77915 epoches: 4 val_acc: 0.798 train_acc 0.7936 epoches: 5 val_acc: 0.8082 train_acc 0.80365 epoches: 6 val_acc: 0.8146 train_acc 0.8107 epoches: 7 val_acc: 0.8872 train_acc 0.8872333 epoches: 8 val_acc: 0.896 train_acc 0.89348334 epoches: 9 val_acc: 0.9007 train_acc 0.8986 epoches: 10 val_acc: 0.9055 train_acc 0.90243334 epoches: 11 val_acc: 0.909 train_acc 0.9058833 epoches: 12 val_acc: 0.9112 train_acc 0.90868336 epoches: 13 val_acc: 0.9126 train_acc 0.91108334 epoches: 14 val_acc: 0.9151 train_acc 0.9139 epoches: 15 val_acc: 0.9172 train_acc 0.91595 epoches: 16 val_acc: 0.9191 train_acc 0.91798335 epoches: 17 val_acc: 0.9204 train_acc 0.91975 epoches: 18 val_acc: 0.9217 train_acc 0.9220333 epoches: 19 val_acc: 0.9252 train_acc 0.9234667 epoches: 20 val_acc: 0.9259 train_acc 0.92515 epoches: 21 val_acc: 0.9281 train_acc 0.9266667 epoches: 22 val_acc: 0.9289 train_acc 0.92826664 epoches: 23 val_acc: 0.9301 train_acc 0.93005 epoches: 24 val_acc: 0.9315 train_acc 0.93126667 epoches: 25 val_acc: 0.9322 train_acc 0.9328 epoches: 26 val_acc: 0.9331 train_acc 0.9339667 epoches: 27 val_acc: 0.9342 train_acc 0.93523335 epoches: 28 val_acc: 0.9353 train_acc 0.93665 epoches: 29 val_acc: 0.9365 train_acc 0.9379333 epoches: 30 val_acc: 0.9369 train_acc 0.93885 epoches: 31 val_acc: 0.9387 train_acc 0.9399 epoches: 32 val_acc: 0.9395 train_acc 0.9409 epoches: 33 val_acc: 0.94 train_acc 0.9417667 epoches: 34 val_acc: 0.9403 train_acc 0.94271666 epoches: 35 val_acc: 0.9409 train_acc 0.9435167 epoches: 36 val_acc: 0.9418 train_acc 0.94443333 epoches: 37 val_acc: 0.942 train_acc 0.94515 epoches: 38 val_acc: 0.9432 train_acc 0.9460667 epoches: 39 val_acc: 0.9443 train_acc 0.9468833 epoches: 40 val_acc: 0.9445 train_acc 0.94741666 epoches: 41 val_acc: 0.9462 train_acc 0.9482 epoches: 42 val_acc: 0.947 train_acc 0.94893336 epoches: 43 val_acc: 0.9472 train_acc 0.94946665 epoches: 44 val_acc: 0.948 train_acc 0.95028335 epoches: 45 val_acc: 0.9486 train_acc 0.95095 epoches: 46 val_acc: 0.9488 train_acc 0.9515833 epoches: 47 val_acc: 0.9492 train_acc 0.95213336 epoches: 48 val_acc: 0.9495 train_acc 0.9529833 epoches: 49 val_acc: 0.9498 train_acc 0.9537 val_acc: 0.9498 ``` ``` import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt def to_onehot(y,num): lables = np.zeros([num,len(y)]) for i in range(len(y)): lables[y[i],i] = 1 return lables.T # 预处理数据 mnist = keras.datasets.fashion_mnist (train_images,train_lables),(test_images,test_lables) = mnist.load_data() print('x_shape:',train_images.shape) #(60000) print('y_shape:',train_lables.shape) X_train = train_images.reshape((-1,train_images.shape[1]*train_images.shape[1])) / 255.0 #X_train = tf.reshape(X_train,[-1,X_train.shape[1]*X_train.shape[2]]) Y_train = to_onehot(train_lables,10) X_test = test_images.reshape((-1,test_images.shape[1]*test_images.shape[1])) / 255.0 Y_test = to_onehot(test_lables,10) #双隐层的神经网络 input_nodes = 784 output_nodes = 10 layer1_nodes = 100 layer2_nodes = 50 batch_size = 100 learning_rate_base = 0.8 learning_rate_decay = 0.99 regularization_rate = 0.0000001 epochs = 50 mad = 0.99 learning_rate = 0.005 # def inference(input_tensor,avg_class,w1,b1,w2,b2): # if avg_class == None: # layer1 = tf.nn.relu(tf.matmul(input_tensor,w1)+b1) # return tf.nn.softmax(tf.matmul(layer1,w2) + b2) # else: # layer1 = tf.nn.relu(tf.matmul(input_tensor,avg_class.average(w1)) + avg_class.average(b1)) # return tf.matual(layer1,avg_class.average(w2)) + avg_class.average(b2) def train(mnist): X = tf.placeholder(tf.float32,[None,input_nodes],name = "input_x") Y = tf.placeholder(tf.float32,[None,output_nodes],name = "y_true") w1 = tf.Variable(tf.truncated_normal([input_nodes,layer1_nodes],stddev=0.1)) b1 = tf.Variable(tf.constant(0.1,shape=[layer1_nodes])) w2 = tf.Variable(tf.truncated_normal([layer1_nodes,layer2_nodes],stddev=0.1)) b2 = tf.Variable(tf.constant(0.1,shape=[layer2_nodes])) w3 = tf.Variable(tf.truncated_normal([layer2_nodes,output_nodes],stddev=0.1)) b3 = tf.Variable(tf.constant(0.1,shape=[output_nodes])) layer1 = tf.nn.relu(tf.matmul(X,w1)+b1) A2 = tf.nn.relu(tf.matmul(layer1,w2)+b2) A3 = tf.nn.relu(tf.matmul(A2,w3)+b3) y_hat = tf.nn.softmax(A3) # y_hat = inference(X,None,w1,b1,w2,b2) # global_step = tf.Variable(0,trainable=False) # variable_averages = tf.train.ExponentialMovingAverage(mad,global_step) # varible_average_op = variable_averages.apply(tf.trainable_variables()) #y = inference(x,variable_averages,w1,b1,w2,b2) cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=A3,labels=Y)) regularizer = tf.contrib.layers.l2_regularizer(regularization_rate) regularization = regularizer(w1) + regularizer(w2) +regularizer(w3) loss = cross_entropy + regularization * regularization_rate # learning_rate = tf.train.exponential_decay(learning_rate_base,global_step,epchos,learning_rate_decay) # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) # with tf.control_dependencies([train_step,varible_average_op]): # train_op = tf.no_op(name="train") correct_prediction = tf.equal(tf.argmax(y_hat,1),tf.argmax(Y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) total_loss = [] val_acc = [] total_train_acc = [] x_Xsis = [] with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(epochs): # x,y = next_batch(X_train,Y_train,batch_size) batchs = int(X_train.shape[0] / batch_size + 1) loss_e = 0. for j in range(batchs): batch_x = X_train[j*batch_size:min(X_train.shape[0],j*(batch_size+1)),:] batch_y = Y_train[j*batch_size:min(X_train.shape[0],j*(batch_size+1)),:] sess.run(train_step,feed_dict={X:batch_x,Y:batch_y}) loss_e += sess.run(loss,feed_dict={X:batch_x,Y:batch_y}) # train_step.run(feed_dict={X:x,Y:y}) validate_acc = sess.run(accuracy,feed_dict={X:X_test,Y:Y_test}) train_acc = sess.run(accuracy,feed_dict={X:X_train,Y:Y_train}) print("epoches: ",i,"val_acc: ",validate_acc,"train_acc",train_acc) total_loss.append(loss_e / batch_size) val_acc.append(validate_acc) total_train_acc.append(train_acc) x_Xsis.append(i) validate_acc = sess.run(accuracy,feed_dict={X:X_test,Y:Y_test}) print("val_acc: ",validate_acc) return (x_Xsis,total_loss,total_train_acc,val_acc) result = train((X_train,Y_train,X_test,Y_test)) def plot_acc(total_train_acc,val_acc,x): plt.figure() plt.plot(x,total_train_acc,'--',color = "red",label="train_acc") plt.plot(x,val_acc,color="green",label="val_acc") plt.xlabel("Epoches") plt.ylabel("acc") plt.legend() plt.show() ```

knn猫狗识别算法中样本为0报错

错误如下: ValueError: With n_samples=0, test_size=0.25 and train_size=None, the resulting train set will be empty. Adjust any of the aforementioned parameters. 代码如下: # import the necessary packages from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report from pyimagesearch.preprocessing import SimplePreprocessor from pyimagesearch.datasets import SimpleDatasetLoader from imutils import paths import argparse # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-d", "--datasets", type=str, default="‪H:\\111\\try1\\pyimagesearch\\datasets\\animals\\", help="path to input dataset") ap.add_argument("-k", "--neighbors", type=int, default=1, help="# of nearest neighbors for classification") ap.add_argument("-j", "--jobs", type=int, default=-1, help="# of jobs for k-NN distance (-1 uses all available cores)") args = vars(ap.parse_args()) # grab the list of images that we’ll be describing print("[INFO] loading images...") imagePaths = list(paths.list_images(args["datasets"])) # initialize the image preprocessor, load the dataset from disk, # and reshape the data matrix sp = SimplePreprocessor.SimplePreprocessor(32, 32) sdl = SimpleDatasetLoader.SimpleDatasetLoader(preprocessors=[sp]) (data, labels) = sdl.load(imagePaths, verbose=500) data = data.reshape((data.shape[0], 3072)) # show some information on memory consumption of the images print("[INFO] features matrix: {:.1f}MB".format( data.nbytes / (1024 * 1000.0))) # encode the labels as integers le = LabelEncoder() labels = le.fit_transform(labels) # partition the data into training and testing splits using 75% of # the data for training and the remaining 25% for testing (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) # train and evaluate a k-NN classifier on the raw pixel intensities print("[INFO] evaluating k-NN classifier...") model = KNeighborsClassifier(n_neighbors=args["neighbors"], n_jobs=args["jobs"]) model.fit(trainX, trainY) print(classification_report(testY, model.predict(testX), target_names=le.classes_)) ``` ```

导入模块错误:(无法从“string”导入名称“atof”)【python】

代码: import os,sys sys.path.append('/nfs3group/chlgrp/datasets/Animals_with_Attributes/code/') from numpy import * from platt import * import pickle, bz2 def nameonly(x): return x.split('\t')[1] def loadstr(openname,converter=str): return [converter(c.strip()) for c in open(openname).readlines()] def bzUnpickle(openname): return pickle.load(bz2.BZ2File(openname)) feature_pattern = '/D:/shuxingfenlei/AwA-features/Animals_with_Attributes/Features/hist/%s-%s.txt' labels_pattern = '/D:/shuxingfenlei/AwA2-features/Animals_with_Attributes2/Features/ResNet101/%s-AwA2-labels.txt' all_features = ['cq','lss','phog','sift','surf','rgsift'] attribute_matrix = 2*loadstr('/shuxingfenlei/AwA2-data/Animals_with_Attributes2/predicate-matrix-binary.txt',dtype=float)-1 classnames = loadstr('/shuxingfenlei/AwA2-data/Animals_with_Attributes2/classes.txt',nameonly) attributenames = loadstr('/shuxingfenlei/AwA2-data/Animals_with_Attributes2/predicates.txt',nameonly) def create_data(all_classes,attribute_id): featurehist={} for feature in all_features: featurehist[feature]=[] labels=[] for classname in all_classes: class_id = classnames.index(classname) class_size = 0 for feature in all_features: featurefilename = feature_pattern % (classname,feature) print ('# ',featurefilename) histopen = bzUnpickle(featurefilename) featurehist[feature].extend( histopen ) labelfilename = labels_pattern % classname print ('# ',labelfilename) print ('#') labels.extend( bzUnpickle(labelfilename)[:,attribute_id] ) for feature in all_features: featurehist[feature]=array(featurehist[feature]).T # shogun likes its data matrices shaped FEATURES x SAMPLES labels = array(labels) return featurehist,labels def train_attribute(attribute_id, C, split=0): from shogun import Classifier,Features,Kernel,Distance attribute_id = int(attribute_id) print ("# attribute ",attributenames[attribute_id]) C = float(C) print ("# C ", C) if split == 0: train_classes=loadstr('/nfs3group/chlgrp/datasets/Animals_with_Attributes/trainclasses.txt') test_classes=loadstr('/nfs3group/chlgrp/datasets/Animals_with_Attributes/testclasses.txt') else: classnames = loadstr('/nfs3group/chlgrp/datasets/Animals_with_Attributes/classnames.txt') startid= (split-1)*10 stopid = split*10 test_classes = classnames[startid:stopid] train_classes = classnames[0:startid]+classnames[stopid:] Xtrn,Ltrn = create_data(train_classes,attribute_id) Xtst,Ltst = create_data(test_classes,attribute_id) if min(Ltrn) == max(Ltrn): # only 1 class Lprior = mean(Ltrn) prediction = sign(Lprior)*ones(len(Ltst)) probabilities = 0.1+0.8*0.5*(Lprior+1.)*ones(len(Ltst)) # fallback return prediction,probabilities,Ltst #sg('loglevel', 'WARN') widths={} for feature in all_features: traindata = array(Xtrn[feature][:,::50],float) # used to be 5*offset sg('set_distance', 'CHISQUARE', 'REAL') sg('clean_features', 'TRAIN') sg('set_features', 'TRAIN', traindata) sg('init_distance', 'TRAIN') DM=sg('get_distance_matrix') widths[feature] = median(DM.flatten()) del DM s = Classifier.LibSVM() #sg('new_svm', 'LIBSVM') Lplatt_trn = concatenate([Ltrn[i::10] for i in range(9)]) # 90% for training Lplatt_val = Ltrn[9::10] # remaining 10% for platt scaling feats_trn = Features.CombinedFeatures() feats_val = Features.CombinedFeatures() for feature in all_features: Xplatt_trn = concatenate([Xtrn[feature][:,i::10] for i in range(9)], axis=1) feats_trn.append_feature_obj( Features.RealFeatures(ascontiguousarray(Xplatt_trn)) ) #sg('add_features', 'TRAIN', Xplatt_trn) Xplatt_val = Xtrn[feature][:,9::10] feats_val.append_feature_obj( Features.RealFeatures(ascontiguousarray(Xplatt_val)) ) #sg('add_features', 'TEST', Xplatt_val) del Xplatt_trn,Xplatt_val,Xtrn[feature] labels_trn = Features.Labels(Lplatt_trn) #sg('set_labels', 'TRAIN', Lplatt_trn) kernel = Kernel.CombinedKernel() #sg('set_kernel', 'COMBINED', 5000) for featureset in all_features: kernel.append_kernel( Kernel.Chi2Kernel( 5000, widths[featureset]/5. ) ) #sg('add_kernel', 1., 'CHI2', 'REAL', 10, widths[featureset]/5. ) kernel.init(feats_trn,feats_trn) K=kernel.get_kernel_matrix() K.tofile('/scratch/chl/cvfold%d_C%g_%02d-trn.kernel' % (split, C, attribute_id)) del K s.set_max_train_time(600*60.) #sg('svm_max_train_time', 600*60.) # one hour should be plenty s.set_C(C,C) #sg('c', C) s.set_kernel(kernel) s.set_labels(labels_trn) #sg('init_kernel', 'TRAIN') try: s.train() #sg('train_classifier') except (RuntimeWarning,RuntimeError): # can't train, e.g. all samples have the same labels Lprior = mean(Ltrn) prediction = sign(Lprior) * ones(len(Ltst)) probabilities = 0.1+0.8*0.5*(Lprior+1.) * ones(len(Ltst)) savetxt('./DAP/cvfold%d_C%g_%02d.txt' % (split, C, attribute_id), prediction) savetxt('./DAP/cvfold%d_C%g_%02d.prob' % (split, C, attribute_id), probabilities) savetxt('./DAP/cvfold%d_C%g_%02d.labels' % (split, C, attribute_id), Ltst) return prediction,probabilities,Ltst bias = s.get_bias() alphas = s.get_alphas() #[bias, alphas]=sg('get_svm') #print bias,alphas kernel.init(feats_trn,feats_val) K=kernel.get_kernel_matrix() K.tofile('/scratch/chl/cvfold%d_C%g_%02d-val.kernel' % (split, C, attribute_id)) del K #sg('init_kernel', 'TEST') try: prediction=s.classify().get_labels() #prediction=sg('classify') platt_params = SigmoidTrain(prediction, Lplatt_val) probabilities = SigmoidPredict(prediction, platt_params) savetxt('./DAP/cvfold%d_C%g_%02d-val.txt' % (split, C, attribute_id), prediction) savetxt('./DAP/cvfold%d_C%g_%02d-val.prob' % (split, C, attribute_id), probabilities) savetxt('./DAP/cvfold%d_C%g_%02d-val.labels' % (split, C, attribute_id), Lplatt_val) savetxt('./DAP/cvfold%d_C%g_%02d-val.platt' % (split, C, attribute_id), platt_params) #print '#train-perf ',attribute_id,C,mean((prediction*Lplatt_val)>0),mean(Lplatt_val>0) #print '#platt-perf ',attribute_id,C,mean((sign(probabilities-0.5)*Lplatt_val)>0),mean(Lplatt_val>0) except RuntimeError: Lprior = mean(Ltrn) prediction = sign(Lprior)*ones(len(Ltst)) probabilities = 0.1+0.8*0.5*(Lprior+1.)*ones(len(Ltst)) print (sys.stderr, "#Error during testing. Using constant platt scaling") platt_params=[1.,0.] # ----------------------------- now apply to test classes ------------------ feats_tst = Features.CombinedFeatures() #sg('clean_features', 'TEST') for feature in all_features: feats_tst.append_feature_obj( Features.RealFeatures(ascontiguousarray(Xtst[feature])) ) del Xtst[feature] kernel.init(feats_trn,feats_tst) K=kernel.get_kernel_matrix() K.tofile('/scratch/chl/cvfold%d_C%g_%02d-tst.kernel' % (split, C, attribute_id)) del K #sg('init_kernel', 'TEST') prediction=s.classify().get_labels() #prediction=sg('classify') probabilities = SigmoidPredict(prediction, platt_params) savetxt('./DAP/cvfold%d_C%g_%02d.txt' % (split, C, attribute_id), prediction) savetxt('./DAP/cvfold%d_C%g_%02d.prob' % (split, C, attribute_id), probabilities) savetxt('./DAP/cvfold%d_C%g_%02d.labels' % (split, C, attribute_id), Ltst) return prediction,probabilities,Ltst if __name__ == '__main__': import sys try: attribute_id = int(sys.argv[1]) except IndexError: print ("Must specify attribute ID!") raise SystemExit try: split = int(sys.argv[2]) except IndexError: split = 0 try: C = float(sys.argv[3]) except IndexError: C = 10. pred,prob,Ltst = train_attribute(attribute_id,C,split) print ("Done.", attribute_id, C, split)

如何可视化tensorflow版的fater rcnn的训练过程?

小白一枚,github下的faster rcnn tf版在win10系统上,代码里没有输出训练日志的语句无log文件, 想问一下怎么添加语句可以通过tensorboard显示?下为train代码 import time import tensorflow as tf import numpy as np from tensorflow.python import pywrap_tensorflow import lib.config.config as cfg from lib.datasets import roidb as rdl_roidb from lib.datasets.factory import get_imdb from lib.datasets.imdb import imdb as imdb2 from lib.layer_utils.roi_data_layer import RoIDataLayer from lib.nets.vgg16 import vgg16 from lib.utils.timer import Timer try: import cPickle as pickle except ImportError: import pickle import os def get_training_roidb(imdb): """Returns a roidb (Region of Interest database) for use in training.""" if True: print('Appending horizontally-flipped training examples...') imdb.append_flipped_images() print('done') print('Preparing training data...') rdl_roidb.prepare_roidb(imdb) print('done') return imdb.roidb def combined_roidb(imdb_names): """ Combine multiple roidbs """ def get_roidb(imdb_name): imdb = get_imdb(imdb_name) print('Loaded dataset `{:s}` for training'.format(imdb.name)) imdb.set_proposal_method("gt") print('Set proposal method: {:s}'.format("gt")) roidb = get_training_roidb(imdb) return roidb roidbs = [get_roidb(s) for s in imdb_names.split('+')] roidb = roidbs[0] if len(roidbs) > 1: for r in roidbs[1:]: roidb.extend(r) tmp = get_imdb(imdb_names.split('+')[1]) imdb = imdb2(imdb_names, tmp.classes) else: imdb = get_imdb(imdb_names) return imdb, roidb class Train: def __init__(self): # Create network if cfg.FLAGS.net == 'vgg16': self.net = vgg16(batch_size=cfg.FLAGS.ims_per_batch) else: raise NotImplementedError self.imdb, self.roidb = combined_roidb("voc_2007_trainval") self.data_layer = RoIDataLayer(self.roidb, self.imdb.num_classes) self.output_dir = cfg.get_output_dir(self.imdb, 'default') def train(self): # Create session tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True sess = tf.Session(config=tfconfig) with sess.graph.as_default(): tf.set_random_seed(cfg.FLAGS.rng_seed) layers = self.net.create_architecture(sess, "TRAIN", self.imdb.num_classes, tag='default') loss = layers['total_loss'] lr = tf.Variable(cfg.FLAGS.learning_rate, trainable=False) momentum = cfg.FLAGS.momentum optimizer = tf.train.MomentumOptimizer(lr, momentum) gvs = optimizer.compute_gradients(loss) # Double bias # Double the gradient of the bias if set if cfg.FLAGS.double_bias: final_gvs = [] with tf.variable_scope('Gradient_Mult'): for grad, var in gvs: scale = 1. if cfg.FLAGS.double_bias and '/biases:' in var.name: scale *= 2. if not np.allclose(scale, 1.0): grad = tf.multiply(grad, scale) final_gvs.append((grad, var)) train_op = optimizer.apply_gradients(final_gvs) else: train_op = optimizer.apply_gradients(gvs) # We will handle the snapshots ourselves self.saver = tf.train.Saver(max_to_keep=100000) # Write the train and validation information to tensorboard # writer = tf.summary.FileWriter(self.tbdir, sess.graph) # valwriter = tf.summary.FileWriter(self.tbvaldir) # Load weights # Fresh train directly from ImageNet weights print('Loading initial model weights from {:s}'.format(cfg.FLAGS.pretrained_model)) variables = tf.global_variables() # Initialize all variables first sess.run(tf.variables_initializer(variables, name='init')) var_keep_dic = self.get_variables_in_checkpoint_file(cfg.FLAGS.pretrained_model) # Get the variables to restore, ignorizing the variables to fix variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic) restorer = tf.train.Saver(variables_to_restore) restorer.restore(sess, cfg.FLAGS.pretrained_model) print('Loaded.') # Need to fix the variables before loading, so that the RGB weights are changed to BGR # For VGG16 it also changes the convolutional weights fc6 and fc7 to # fully connected weights self.net.fix_variables(sess, cfg.FLAGS.pretrained_model) print('Fixed.') sess.run(tf.assign(lr, cfg.FLAGS.learning_rate)) last_snapshot_iter = 0 timer = Timer() iter = last_snapshot_iter + 1 last_summary_time = time.time() while iter < cfg.FLAGS.max_iters + 1: # Learning rate if iter == cfg.FLAGS.step_size + 1: # Add snapshot here before reducing the learning rate # self.snapshot(sess, iter) sess.run(tf.assign(lr, cfg.FLAGS.learning_rate * cfg.FLAGS.gamma)) timer.tic() # Get training data, one batch at a time blobs = self.data_layer.forward() # Compute the graph without summary rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, total_loss = self.net.train_step(sess, blobs, train_op) timer.toc() iter += 1 # Display training information if iter % (cfg.FLAGS.display) == 0: print('iter: %d / %d, total loss: %.6f\n >>> rpn_loss_cls: %.6f\n ' '>>> rpn_loss_box: %.6f\n >>> loss_cls: %.6f\n >>> loss_box: %.6f\n ' % \ (iter, cfg.FLAGS.max_iters, total_loss, rpn_loss_cls, rpn_loss_box, loss_cls, loss_box)) print('speed: {:.3f}s / iter'.format(timer.average_time)) if iter % cfg.FLAGS.snapshot_iterations == 0: self.snapshot(sess, iter ) def get_variables_in_checkpoint_file(self, file_name): try: reader = pywrap_tensorflow.NewCheckpointReader(file_name) var_to_shape_map = reader.get_variable_to_shape_map() return var_to_shape_map except Exception as e: # pylint: disable=broad-except print(str(e)) if "corrupted compressed block contents" in str(e): print("It's likely that your checkpoint file has been compressed " "with SNAPPY.") def snapshot(self, sess, iter): net = self.net if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) # Store the model snapshot filename = 'vgg16_faster_rcnn_iter_{:d}'.format(iter) + '.ckpt' filename = os.path.join(self.output_dir, filename) self.saver.save(sess, filename) print('Wrote snapshot to: {:s}'.format(filename)) # Also store some meta information, random state, etc. nfilename = 'vgg16_faster_rcnn_iter_{:d}'.format(iter) + '.pkl' nfilename = os.path.join(self.output_dir, nfilename) # current state of numpy random st0 = np.random.get_state() # current position in the database cur = self.data_layer._cur # current shuffled indeces of the database perm = self.data_layer._perm # Dump the meta info with open(nfilename, 'wb') as fid: pickle.dump(st0, fid, pickle.HIGHEST_PROTOCOL) pickle.dump(cur, fid, pickle.HIGHEST_PROTOCOL) pickle.dump(perm, fid, pickle.HIGHEST_PROTOCOL) pickle.dump(iter, fid, pickle.HIGHEST_PROTOCOL) return filename, nfilename if __name__ == '__main__': train = Train() train.train()

报错ImportError: cannot import name 'BaseEstimator'

有遇到这个问题的嘛 ![图片说明](https://img-ask.csdn.net/upload/201904/04/1554376729_569223.png) 貌似是sklearn库的问题,我卸了numpy scipy scikt-learn又重装也没得用 网上查不到这个报错”ImportError: cannot import name 'BaseEstimator'“ 代码: from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split def datasets_demo(): #获取并打印数据集 iris = load_iris() print("鸢尾花数据集:\n", iris) print("查看数据集描述:\n", iris["DESCR"]) print("查看特征值名字:\n", iris.feature_names) print("查看特征值名字:\n", iris.data, iris.data.shape) x_train,x_text,y_train,y_text=train_test_split(iris.data,iris.target,text_size=0.2,random_state=22) prinit("训练集特征值:\n",x_train,x_train.shape) 报错: D:\python\python.exe "F:/Administrator/python/begin/day1/machine learning.py" Traceback (most recent call last): File "F:/Administrator/python/begin/day1/machine learning.py", line 1, in <module> from sklearn.datasets import load_iris File "D:\python\lib\site-packages\sklearn\__init__.py", line 64, in <module> from .base import clone File "D:\python\lib\site-packages\sklearn\base.py", line 6, in <module> import copy File "F:\Administrator\python\begin\day1\copy.py", line 1, in <module> from sklearn.datasets import load_iris File "D:\python\lib\site-packages\sklearn\datasets\__init__.py", line 23, in <module> from .twenty_newsgroups import fetch_20newsgroups File "D:\python\lib\site-packages\sklearn\datasets\twenty_newsgroups.py", line 44, in <module> from ..feature_extraction.text import CountVectorizer File "D:\python\lib\site-packages\sklearn\feature_extraction\__init__.py", line 7, in <module> from .dict_vectorizer import DictVectorizer File "D:\python\lib\site-packages\sklearn\feature_extraction\dict_vectorizer.py", line 11, in <module> from ..base import BaseEstimator, TransformerMixin ImportError: cannot import name 'BaseEstimator' 进程已结束,退出代码1

tensorflow上的一个案例mnist,运行出错,求问

from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf # Import data mnist = input_data.read_data_sets('MNIST_data/', one_hot=True) # Create the model x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.matmul(x, W) + b # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 10]) # The raw formulation of cross-entropy, # # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)), # reduction_indices=[1])) # # can be numerically unstable. # # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw # outputs of 'y', and then average across the batch. cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.InteractiveSession() tf.global_variables_initializer().run() # Train for _ in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # Test trained model correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) 错误如下: Traceback (most recent call last): File "/home/linbinghui/文档/pycode/Text-1.py", line 5, in <module> mnist = input_data.read_data_sets('MNIST_data/', one_hot=True) File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py", line 189, in read_data_sets local_file = maybe_download(TEST_IMAGES, train_dir, SOURCE_URL + TEST_IMAGES) File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 81, in m aybe_download urllib.request.urlretrieve(source_url, temp_file_name) File "/usr/lib/python2.7/urllib.py", line 98, in urlretrieve return opener.retrieve(url, filename, reporthook, data) File "/usr/lib/python2.7/urllib.py", line 245, in retrieve fp = self.open(url, data) File "/usr/lib/python2.7/urllib.py", line 213, in open return getattr(self, name)(url) File "/usr/lib/python2.7/urllib.py", line 364, in open_http return self.http_error(url, fp, errcode, errmsg, headers) File "/usr/lib/python2.7/urllib.py", line 377, in http_error result = method(url, fp, errcode, errmsg, headers) File "/usr/lib/python2.7/urllib.py", line 642, in http_error_302 headers, data) File "/usr/lib/python2.7/urllib.py", line 669, in redirect_internal return self.open(newurl) File "/usr/lib/python2.7/urllib.py", line 213, in open return getattr(self, name)(url) File "/usr/lib/python2.7/urllib.py", line 350, in open_http h.endheaders(data) File "/usr/lib/python2.7/httplib.py", line 1053, in endheaders self._send_output(message_body) File "/usr/lib/python2.7/httplib.py", line 897, in _send_output self.send(msg) File "/usr/lib/python2.7/httplib.py", line 859, in send self.connect() File "/usr/lib/python2.7/httplib.py", line 836, in connect self.timeout, self.source_address) File "/usr/lib/python2.7/socket.py", line 575, in create_connection raise err IOError: [Errno socket error] [Errno 111] Connection refused

tensorflow在第一次运行Fashion MNIST会下载数据集,应该网络不好中断了报错不知咋办?

```**tensorflow在第一次运行Fashion MNIST会下载数据集,应该网络不好中断了报错不知咋办?** 代码如下: !/usr/bin/python _*_ coding: utf-8 -*- from __future__ import print_function import tensorflow as tf import matplotlib as mpl import matplotlib.pyplot as plt %matplotlib inline import numpy as np import sklearn import pandas as pd import os import sys import time from tensorflow import keras print (tf.__version__) print (sys.version_info) for module in mpl ,np, pd, sklearn, tf, keras: print (module.__name__,module.__version__) fashion_mnist = keras.datasets.fashion_mnist (x_train_all,y_train_all),(x_test,y_test) = fashion_mnist.load_data() x_valid,x_train = x_train_all[:5000],x_train_all[5000:] y_valid,y_train = y_train_all[:5000],y_train_all[5000:] print (x_valid.shape, y_valid.shape) print (x_train.shape, y_train.shape) print (x_test.shape, y_test.shape) ``` ``` 报错如下: 2.1.0 sys.version_info(major=2, minor=7, micro=12, releaselevel='final', serial=0) matplotlib 2.2.5 numpy 1.16.6 pandas 0.24.2 sklearn 0.20.4 tensorflow 2.1.0 tensorflow_core.python.keras.api._v2.keras 2.2.4-tf Traceback (most recent call last): File "/home/join/test_demo/test2.py", line 26, in <module> (x_train_all,y_train_all),(x_test,y_test) = fashion_mnist.load_data() File "/usr/local/lib/python2.7/dist-packages/tensorflow_core/python/keras/data sets/fashion_mnist.py", line 59, in load_data imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28) File "/usr/lib/python2.7/gzip.py", line 261, in read self._read(readsize) File "/usr/lib/python2.7/gzip.py", line 315, in _read self._read_eof() File "/usr/lib/python2.7/gzip.py", line 354, in _read_eof hex(self.crc))) IOError: CRC check failed 0xa445bb78 != 0xe7f80d 3fL ``` ```

tensorflow里面什么养的类型可以用.next_batch?

Python里普通类型的矩阵、tensor类型的矩阵或者、tensorDataset类型都有,但是不能.next__batch,求问需要转成什么类型才能用来.next__batch? 原数据是从csv文件里读取的string类型转成float类型后获得的,然后想用这些数据feed变量的时候,出现了上面的疑问,有没有更好的简单的方法?网上查了好像不太能解答。。

如何解决cannot import name 'downsample'

程序运行到最后一句话fit1.fit(x_train,y_train)的时候报错,不知道怎么解决,大佬可以帮忙解答一下吗 ``` from sklearn import datasets boston=datasets.load_boston() x,y=boston.data, boston.target from sklearn import preprocessing x_MinMax=preprocessing.MinMaxScaler() y_MinMax=preprocessing.MinMaxScaler() import numpy as np y=np.array(y).reshape((len(y),1)) x=x_MinMax.fit_transform(x) #np.array确保y是numpy数组 y=y_MinMax.fit_transform(y) x.mean(axis=0) import random from sklearn.cross_validation import train_test_split np.random.seed(2016) x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2) from sknn.mlp import Regressor,Layer #预测模型 fit1=Regressor(layers=[Layer('Sigmoid',units=6),Layer('Sigmoid',units=14), Layer('Linear')],learning_rate=0.02, random_state=2016, n_iter=10 ) fit1.fit(x_train,y_train) ```

加载sklearn外部数据集时无法停止程序

使用以下代码加载sklearn的California_housing数据集,程序一直在运行,文件成功下载到目标文件夹,但是无法打印变量housing的相关信息。python用的3.6版本 ``` from sklearn.datasets.california_housing import fetch_california_housing housing = fetch_california_housing() print(housing.DESCR) ``` 运行窗口出现这个,![图片说明](https://img-ask.csdn.net/upload/201903/08/1552052688_693941.png) 麻烦大佬们看看是咋回事啊

mnist教程中使用自己的数据,load_data该如何定义?

在学习mnist时使用官方数据包, 换成自己的数据集,(x_train, y_train)=mnist.load_data()代码中的mnist该怎样替换? 直接删除mnist,提示load_data未定义,自己随机添加一个数据名例如“s”,则报错提示s未定义,请问该怎么修改?

bad input shape (60000, 2)

本小白在看机器学习实战时,绘制精度、召回率相对阈值的函数图时报了错。 代码如下: ``` from sklearn.datasets import fetch_mldata import matplotlib import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import SGDClassifier from sklearn.model_selection import StratifiedKFold from sklearn.base import clone from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score,recall_score from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_curve from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score #导入部分 mnist = fetch_mldata('MNIST original') X,y = mnist["data"],mnist["target"] #显现部分 some_digit = X[36000] some_digit_image = some_digit.reshape(28,28) plt.imshow(some_digit_image,cmap=matplotlib.cm.binary,interpolation="nearest") plt.axis("off") #plt.show() #训练集和测试集 X_train,X_test,y_train,y_test=X[:60000],X[60000:],y[:60000],y[60000:] shuffle_index = np.random.permutation(60000) X_train,y_train = X_train[shuffle_index],y_train[shuffle_index] #二分分类器 y_train_5 = (y_train == 5) y_test_5 = (y_test == 5) sgd_clf = SGDClassifier(random_state=42) sgd_clf.fit(X_train,y_train_5) predict1 = sgd_clf.predict([some_digit]) print(predict1) #实施交叉验证 skfolds = StratifiedKFold(n_splits=3,random_state=42) for train_index,test_index in skfolds.split(X_train,y_train_5): clone_clf = clone(sgd_clf) X_train_folds = X_train[train_index] y_train_folds = (y_train_5[train_index]) X_test_fold = X_train[test_index] y_test_fold = (y_train_5[test_index]) clone_clf.fit(X_train_folds,y_train_folds) y_pred = clone_clf.predict(X_test_fold) n_correct = sum(y_pred == y_test_fold) print(n_correct/len(y_pred)) #kfold方法 print(cross_val_score(sgd_clf,X_train,y_train_5,cv=3,scoring="accuracy")) y_train_pred = cross_val_predict(sgd_clf,X_train,y_train_5,cv=3) #print(confusion_matrix(y_train_5,y_train_pred)) #print(precision_score(y_train_5,y_pred)) #精度 #print(recall_score(y_train_5,y_train_pred)) #召回率 #print(f1_score(y_train_5,y_pred)) #fi分数 y_scores = sgd_clf.decision_function([some_digit]) print(y_scores) #threshold = 0 #y_some_digit_pred = (y_scores>threshold) #print(y_some_digit_pred) #提高阈值 threshold = 200000 y_some_digit_pred = (y_scores>threshold) print(y_some_digit_pred) #绘制阈值函数图 y_scores = cross_val_predict(sgd_clf,X_train,y_train_5,cv=3,method="decision_function") precisions, recalls, thresholds = precision_recall_curve(y_train_5,y_scores) def plot_precison_recall_vs_threshold(precisions,recalls,thresholds): plt.plot(thresholds,precisions[:-1],"b--",label="Precision") plt.plot(thresholds, recalls[:-1], "g-", label="Recall") plt.xlabel("Threshold") plt.legend(loc="upper left") plt.ylim([0,1]) plot_precison_recall_vs_threshold(precisions,recalls,thresholds) plt.show() ``` 报错信息如下: Traceback (most recent call last): File "F:/python项目/mnist.py", line 77, in <module> precisions, recalls, thresholds = precision_recall_curve(y_train_5,y_scores) File "C:\Users\15701\Anaconda3\lib\site-packages\sklearn\metrics\ranking.py", line 417, in precision_recall_curve sample_weight=sample_weight) File "C:\Users\15701\Anaconda3\lib\site-packages\sklearn\metrics\ranking.py", line 304, in _binary_clf_curve y_score = column_or_1d(y_score) File "C:\Users\15701\Anaconda3\lib\site-packages\sklearn\utils\validation.py", line 583, in column_or_1d raise ValueError("bad input shape {0}".format(shape)) ValueError: bad input shape (60000, 2) 不胜感激

大学四年自学走来,这些私藏的实用工具/学习网站我贡献出来了

大学四年,看课本是不可能一直看课本的了,对于学习,特别是自学,善于搜索网上的一些资源来辅助,还是非常有必要的,下面我就把这几年私藏的各种资源,网站贡献出来给你们。主要有:电子书搜索、实用工具、在线视频学习网站、非视频学习网站、软件下载、面试/求职必备网站。 注意:文中提到的所有资源,文末我都给你整理好了,你们只管拿去,如果觉得不错,转发、分享就是最大的支持了。 一、电子书搜索 对于大部分程序员...

在中国程序员是青春饭吗?

今年,我也32了 ,为了不给大家误导,咨询了猎头、圈内好友,以及年过35岁的几位老程序员……舍了老脸去揭人家伤疤……希望能给大家以帮助,记得帮我点赞哦。 目录: 你以为的人生 一次又一次的伤害 猎头界的真相 如何应对互联网行业的「中年危机」 一、你以为的人生 刚入行时,拿着傲人的工资,想着好好干,以为我们的人生是这样的: 等真到了那一天,你会发现,你的人生很可能是这样的: ...

程序员请照顾好自己,周末病魔差点一套带走我。

程序员在一个周末的时间,得了重病,差点当场去世,还好及时挽救回来了。

ArrayList源码分析(入门篇)

ArrayList源码分析 前言: 写这篇博客的主要原因是,在我上一次参加千牵科技Java实习生面试时,有被面试官问到ArrayList为什么查找的速度较快,插入和删除的速度较慢?当时我回答得不好,很大的一部分原因是因为我没有阅读过ArrayList源码,虽然最后收到Offer了,但我拒绝了,打算寒假学得再深入些再广泛些,下学期开学后再去投递其他更好的公司。为了更加深入理解ArrayList,也为

我以为我学懂了数据结构,直到看了这个导图才发现,我错了

数据结构与算法思维导图

String s = new String(" a ") 到底产生几个对象?

老生常谈的一个梗,到2020了还在争论,你们一天天的,哎哎哎,我不是针对你一个,我是说在座的各位都是人才! 上图红色的这3个箭头,对于通过new产生一个字符串(”宜春”)时,会先去常量池中查找是否已经有了”宜春”对象,如果没有则在常量池中创建一个此字符串对象,然后堆中再创建一个常量池中此”宜春”对象的拷贝对象。 也就是说准确答案是产生了一个或两个对象,如果常量池中原来没有 ”宜春” ,就是两个。...

技术大佬:我去,你写的 switch 语句也太老土了吧

昨天早上通过远程的方式 review 了两名新来同事的代码,大部分代码都写得很漂亮,严谨的同时注释也很到位,这令我非常满意。但当我看到他们当中有一个人写的 switch 语句时,还是忍不住破口大骂:“我擦,小王,你丫写的 switch 语句也太老土了吧!” 来看看小王写的代码吧,看完不要骂我装逼啊。 private static String createPlayer(PlayerTypes p...

和黑客斗争的 6 天!

互联网公司工作,很难避免不和黑客们打交道,我呆过的两家互联网公司,几乎每月每天每分钟都有黑客在公司网站上扫描。有的是寻找 Sql 注入的缺口,有的是寻找线上服务器可能存在的漏洞,大部分都...

讲一个程序员如何副业月赚三万的真实故事

loonggg读完需要3分钟速读仅需 1 分钟大家好,我是你们的校长。我之前讲过,这年头,只要肯动脑,肯行动,程序员凭借自己的技术,赚钱的方式还是有很多种的。仅仅靠在公司出卖自己的劳动时...

上班一个月,后悔当初着急入职的选择了

最近有个老铁,告诉我说,上班一个月,后悔当初着急入职现在公司了。他之前在美图做手机研发,今年美图那边今年也有一波组织优化调整,他是其中一个,在协商离职后,当时捉急找工作上班,因为有房贷供着,不能没有收入来源。所以匆忙选了一家公司,实际上是一个大型外包公司,主要派遣给其他手机厂商做外包项目。**当时承诺待遇还不错,所以就立马入职去上班了。但是后面入职后,发现薪酬待遇这块并不是HR所说那样,那个HR自...

女程序员,为什么比男程序员少???

昨天看到一档综艺节目,讨论了两个话题:(1)中国学生的数学成绩,平均下来看,会比国外好?为什么?(2)男生的数学成绩,平均下来看,会比女生好?为什么?同时,我又联想到了一个技术圈经常讨...

副业收入是我做程序媛的3倍,工作外的B面人生是怎样的?

提到“程序员”,多数人脑海里首先想到的大约是:为人木讷、薪水超高、工作枯燥…… 然而,当离开工作岗位,撕去层层标签,脱下“程序员”这身外套,有的人生动又有趣,马上展现出了完全不同的A/B面人生! 不论是简单的爱好,还是正经的副业,他们都干得同样出色。偶尔,还能和程序员的特质结合,产生奇妙的“化学反应”。 @Charlotte:平日素颜示人,周末美妆博主 大家都以为程序媛也个个不修边幅,但我们也许...

MySQL数据库面试题(2020最新版)

文章目录数据库基础知识为什么要使用数据库什么是SQL?什么是MySQL?数据库三大范式是什么mysql有关权限的表都有哪几个MySQL的binlog有有几种录入格式?分别有什么区别?数据类型mysql有哪些数据类型引擎MySQL存储引擎MyISAM与InnoDB区别MyISAM索引与InnoDB索引的区别?InnoDB引擎的4大特性存储引擎选择索引什么是索引?索引有哪些优缺点?索引使用场景(重点)...

如果你是老板,你会不会踢了这样的员工?

有个好朋友ZS,是技术总监,昨天问我:“有一个老下属,跟了我很多年,做事勤勤恳恳,主动性也很好。但随着公司的发展,他的进步速度,跟不上团队的步伐了,有点...

我入职阿里后,才知道原来简历这么写

私下里,有不少读者问我:“二哥,如何才能写出一份专业的技术简历呢?我总感觉自己写的简历太烂了,所以投了无数份,都石沉大海了。”说实话,我自己好多年没有写过简历了,但我认识的一个同行,他在阿里,给我说了一些他当年写简历的方法论,我感觉太牛逼了,实在是忍不住,就分享了出来,希望能够帮助到你。 01、简历的本质 作为简历的撰写者,你必须要搞清楚一点,简历的本质是什么,它就是为了来销售你的价值主张的。往深...

玩转springboot启动banner定义所得

最近接手了一个springboot项目,不是不熟悉这个框架,启动时打印的信息吸引了我。 这不是我熟悉的常用springboot的打印信息啊,我打开自己的项目: 还真是的,不用默认的感觉也挺高大上的。一时兴起,就去研究了一下源代码,还正是有些收获,稍后我会总结一下。正常情况下做为一个老程序员,是不会对这种小儿科感兴趣的,不就是一个控制台打印嘛。哈哈! 于是出于最初的好奇,研究了项目的源代码。看到

带了6个月的徒弟当了面试官,而身为高级工程师的我天天修Bug......

即将毕业的应届毕业生一枚,现在只拿到了两家offer,但最近听到一些消息,其中一个offer,我这个组据说客户很少,很有可能整组被裁掉。 想问大家: 如果我刚入职这个组就被裁了怎么办呢? 大家都是什么时候知道自己要被裁了的? 面试软技能指导: BQ/Project/Resume 试听内容: 除了刷题,还有哪些技能是拿到offer不可或缺的要素 如何提升面试软实力:简历, 行为面试,沟通能...

离职半年了,老东家又发 offer,回不回?

有小伙伴问松哥这个问题,他在上海某公司,在离职了几个月后,前公司的领导联系到他,希望他能够返聘回去,他很纠结要不要回去? 俗话说好马不吃回头草,但是这个小伙伴既然感到纠结了,我觉得至少说明了两个问题:1.曾经的公司还不错;2.现在的日子也不是很如意。否则应该就不会纠结了。 老实说,松哥之前也有过类似的经历,今天就来和小伙伴们聊聊回头草到底吃不吃。 首先一个基本观点,就是离职了也没必要和老东家弄的苦...

男生更看重女生的身材脸蛋,还是思想?

往往,我们看不进去大段大段的逻辑。深刻的哲理,往往短而精悍,一阵见血。问:产品经理挺漂亮的,有点心动,但不知道合不合得来。男生更看重女生的身材脸蛋,还是...

为什么程序员做外包会被瞧不起?

二哥,有个事想询问下您的意见,您觉得应届生值得去外包吗?公司虽然挺大的,中xx,但待遇感觉挺低,马上要报到,挺纠结的。

当HR压你价,说你只值7K,你该怎么回答?

当HR压你价,说你只值7K时,你可以流畅地回答,记住,是流畅,不能犹豫。 礼貌地说:“7K是吗?了解了。嗯~其实我对贵司的面试官印象很好。只不过,现在我的手头上已经有一份11K的offer。来面试,主要也是自己对贵司挺有兴趣的,所以过来看看……”(未完) 这段话主要是陪HR互诈的同时,从公司兴趣,公司职员印象上,都给予对方正面的肯定,既能提升HR的好感度,又能让谈判气氛融洽,为后面的发挥留足空间。...

面试:第十六章:Java中级开发(16k)

HashMap底层实现原理,红黑树,B+树,B树的结构原理 Spring的AOP和IOC是什么?它们常见的使用场景有哪些?Spring事务,事务的属性,传播行为,数据库隔离级别 Spring和SpringMVC,MyBatis以及SpringBoot的注解分别有哪些?SpringMVC的工作原理,SpringBoot框架的优点,MyBatis框架的优点 SpringCould组件有哪些,他们...

面试阿里p7,被按在地上摩擦,鬼知道我经历了什么?

面试阿里p7被问到的问题(当时我只知道第一个):@Conditional是做什么的?@Conditional多个条件是什么逻辑关系?条件判断在什么时候执...

终于懂了TCP和UDP协议区别

终于懂了TCP和UDP协议区别

你打算用Java 8一辈子都不打算升级到Java 14,真香

我们程序员应该抱着尝鲜、猎奇的心态,否则就容易固步自封,技术停滞不前。

无代码时代来临,程序员如何保住饭碗?

编程语言层出不穷,从最初的机器语言到如今2500种以上的高级语言,程序员们大呼“学到头秃”。程序员一边面临编程语言不断推陈出新,一边面临由于许多代码已存在,程序员编写新应用程序时存在重复“搬砖”的现象。 无代码/低代码编程应运而生。无代码/低代码是一种创建应用的方法,它可以让开发者使用最少的编码知识来快速开发应用程序。开发者通过图形界面中,可视化建模来组装和配置应用程序。这样一来,开发者直...

面试了一个 31 岁程序员,让我有所触动,30岁以上的程序员该何去何从?

最近面试了一个31岁8年经验的程序猿,让我有点感慨,大龄程序猿该何去何从。

大三实习生,字节跳动面经分享,已拿Offer

说实话,自己的算法,我一个不会,太难了吧

程序员垃圾简历长什么样?

已经连续五年参加大厂校招、社招的技术面试工作,简历看的不下于万份 这篇文章会用实例告诉你,什么是差的程序员简历! 疫情快要结束了,各个公司也都开始春招了,作为即将红遍大江南北的新晋UP主,那当然要为小伙伴们做点事(手动狗头)。 就在公众号里公开征简历,义务帮大家看,并一一点评。《启舰:春招在即,义务帮大家看看简历吧》 一石激起千层浪,三天收到两百多封简历。 花光了两个星期的所有空闲时...

《经典算法案例》01-08:如何使用质数设计扫雷(Minesweeper)游戏

我们都玩过Windows操作系统中的经典游戏扫雷(Minesweeper),如果把质数当作一颗雷,那么,表格中红色的数字哪些是雷(质数)?您能找出多少个呢?文中用列表的方式罗列了10000以内的自然数、质数(素数),6的倍数等,方便大家观察质数的分布规律及特性,以便对算法求解有指导意义。另外,判断质数是初学算法,理解算法重要性的一个非常好的案例。

立即提问
相关内容推荐