cifar-10分类问题，同样的模型结构以及损失函数还有学习率参数等超参数，分别用TensorFlow和keras实现。
20个epochs后在测试集上进行预测，准确率总是差好几个百分点，不知道问题出在哪里？代码如下：

``````import tensorflow as tf
import numpy as np
import pickle as pk

tf.reset_default_graph()

batch_size = 64
test_size = 10000
img_size = 32
num_classes = 10
training_epochs = 10
test_size=200

###############################################################################
def unpickle(filename):
'''解压数据'''
with open(filename, 'rb') as f:
return d

def onehot(labels):
'''one-hot 编码'''
n_sample = len(labels)
n_class = max(labels) + 1
onehot_labels = np.zeros((n_sample, n_class))
onehot_labels[np.arange(n_sample), labels] = 1
return onehot_labels

# 训练数据集
data1 = unpickle('data_batch_1')
data2 = unpickle('data_batch_2')
data3 = unpickle('data_batch_3')
data4 = unpickle('data_batch_4')
data5 = unpickle('data_batch_5')
X_train = np.concatenate((data1['data'], data2['data'], data3['data'], data4['data'], data5['data']), axis=0)/255.0
y_train = np.concatenate((data1['labels'], data2['labels'], data3['labels'], data4['labels'], data5['labels']), axis=0)
y_train = onehot(y_train)
# 测试数据集
test = unpickle('test_batch')
X_test = test['data']/255.0
y_test = onehot(test['labels'])
del test,data1,data2,data3,data4,data5
###############################################################################

w =  tf.Variable(tf.random_normal([5, 5, 3, 32], stddev=0.01))
w_c= tf.Variable(tf.random_normal([32* 16* 16, 512], stddev=0.1))
w_o =tf.Variable(tf.random_normal([512, num_classes], stddev=0.1))

def init_bias(shape):
return  tf.Variable(tf.constant(0.0, shape=shape))
b=init_bias([32])
b_c=init_bias([512])
b_o=init_bias([10])

def model(X, w, w_c,w_o, p_keep_conv, p_keep_hidden,b,b_c,b_o):

conv1 = tf.nn.conv2d(X, w,strides=[1, 1, 1, 1],padding='SAME')#32x32x32
conv1 = tf.nn.relu(conv1)
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')#16x16x32
conv1 = tf.nn.dropout(conv1, p_keep_conv)

FC_layer = tf.reshape(conv1, [-1, 32 * 16 * 16])

out_layer=tf.matmul(FC_layer, w_c)+b_c
out_layer=tf.nn.relu(out_layer)
out_layer = tf.nn.dropout(out_layer, p_keep_hidden)

result = tf.matmul(out_layer, w_o)+b_o

return result

trX, trY, teX, teY = X_train,y_train,X_test,y_test

trX = trX.reshape(-1, img_size, img_size, 3)
teX = teX.reshape(-1, img_size, img_size, 3)

X = tf.placeholder("float", [None, img_size, img_size, 3])
Y = tf.placeholder("float", [None, num_classes])
p_keep_conv = tf.placeholder("float")
p_keep_hidden = tf.placeholder("float")

py_x = model(X, w, w_c,w_o, p_keep_conv, p_keep_hidden,b,b_c,b_o)

Y_ = tf.nn.softmax_cross_entropy_with_logits_v2(logits=py_x, labels=Y)
cost = tf.reduce_mean(Y_)
optimizer  = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
predict_op = tf.argmax(py_x, 1)

with tf.Session() as sess:

tf.global_variables_initializer().run()
for i in range(training_epochs):
training_batch = zip(range(0, len(trX),batch_size),range(batch_size, len(trX)+1,batch_size))

perm=np.arange(len(trX))
np.random.shuffle(perm)
trX=trX[perm]
trY=trY[perm]

for start, end in training_batch:
sess.run(optimizer, feed_dict={X: trX[start:end],Y: trY[start:end],p_keep_conv:0.75,p_keep_hidden: 0.5})

test_batch = zip(range(0, len(teX),test_size),range(test_size, len(teX)+1,test_size))

accuracyResult=0
for start, end in test_batch:
accuracyResult=accuracyResult+sum(np.argmax(teY[start:end], axis=1) ==sess.run(predict_op, feed_dict={X: teX[start:end],Y: teY[start:end],p_keep_conv: 1,p_keep_hidden: 1}))
print(i, accuracyResult/10000)
``````

``````from keras import initializers
from keras.datasets import cifar10
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.optimizers import SGD, Adam, RMSprop
#import matplotlib.pyplot as plt

# CIFAR_10 is a set of 60K images 32x32 pixels on 3 channels
IMG_CHANNELS = 3
IMG_ROWS = 32
IMG_COLS = 32
#constant
BATCH_SIZE = 64
NB_EPOCH = 10
NB_CLASSES = 10
VERBOSE = 1
VALIDATION_SPLIT = 0
OPTIM = RMSprop()
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
#print('X_train shape:', X_train.shape)
#print(X_train.shape[0], 'train samples')
#print(X_test.shape[0], 'test samples')

# convert to categorical
Y_train = np_utils.to_categorical(y_train, NB_CLASSES)
Y_test = np_utils.to_categorical(y_test, NB_CLASSES)
# float and normalization
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

# network
model = Sequential()
model.summary()

# train
model.compile(loss='categorical_crossentropy', optimizer=OPTIM,metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=BATCH_SIZE,epochs=NB_EPOCH, validation_split=VALIDATION_SPLIT,verbose=VERBOSE)
score = model.evaluate(X_test, Y_test,batch_size=200, verbose=VERBOSE)
print("Test score:", score[0])
print('Test accuracy:', score[1])
``````

spyder import TensorFlow 或者 keras时不报错，程序终止。

spyder import TensorFlow 或者 keras时不报错，程序终止。 后面所有结果都没有出来，求助！！如何解决！！！

Tensorflow代码转到Keras

TensorFlow的Keras如何使用Dataset作为数据输入？

tensorflow和keras一设置激活函数好像就是会默认设置一整层所有节点都会是同一个激活函数，请问要如何实现同一层不同节点有不同激活函数？

tensorflow环境下只要import keras 就会出现python已停止运行？

python小白在写代码的时候发现只要import keras就会出现python停止运行的情况，目前tensorflow版本1.2.1,keras版本2.1.1，防火墙关了也还是这样，具体代码和问题信息如下，请大神赐教。 ``` # -*- coding: utf-8 -*- import numpy as np from scipy.io import loadmat, savemat from keras.utils import np_utils 问题事件名称: BEX64 应用程序名: pythonw.exe 应用程序版本: 3.6.2150.1013 应用程序时间戳: 5970e8ca 故障模块名称: StackHash_1dc2 故障模块版本: 0.0.0.0 故障模块时间戳: 00000000 异常偏移: 0000000000000000 异常代码: c0000005 异常数据: 0000000000000008 OS 版本: 6.1.7601.2.1.0.256.1 区域设置 ID: 2052 其他信息 1: 1dc2 其他信息 2: 1dc22fb1de37d348f27e54dbb5278e7d 其他信息 3: eae3 其他信息 4: eae36a4b5ffb27c9d33117f4125a75c2 ```

pytorch lstmcell方法转化成keras或者tensorflow

pytorch self.att_lstm = nn.LSTMCell(1536, 512) self.lang_lstm = nn.LSTMCell(1024, 512) 请问上面的如何转成同等的keras或者tensorflow

Python Tensorflow中dense问题

tf.layers.dense中units的参数设定依据什么规则？是维数越大越精确吗？刚刚开始学，希望能细讲下谢谢

tensorflow训练完模型直接测试和导入模型进行测试的结果不同，一个很好，一个略差，这是为什么？

keras薛定谔的训练结果问题

tf.keras 关于 胶囊网络 capsule的问题

``` from tensorflow.keras import backend as K from tensorflow.keras.layers import Layer from tensorflow.keras import activations from tensorflow.keras import utils from tensorflow.keras.models import Model from tensorflow.keras.layers import * from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import TensorBoard import mnist import tensorflow batch_size = 128 num_classes = 10 epochs = 20 """ 压缩函数,我们使用0.5替代hinton论文中的1,如果是1，所有的向量的范数都将被缩小。 如果是0.5，小于0.5的范数将缩小，大于0.5的将被放大 """ def squash(x, axis=-1): s_quared_norm = K.sum(K.square(x), axis, keepdims=True) + K.epsilon() scale = K.sqrt(s_quared_norm) / (0.5 + s_quared_norm) result = scale * x return result # 定义我们自己的softmax函数，而不是K.softmax.因为K.softmax不能指定轴 def softmax(x, axis=-1): ex = K.exp(x - K.max(x, axis=axis, keepdims=True)) result = ex / K.sum(ex, axis=axis, keepdims=True) return result # 定义边缘损失，输入y_true, p_pred，返回分数，传入即可fit时候即可 def margin_loss(y_true, y_pred): lamb, margin = 0.5, 0.1 result = K.sum(y_true * K.square(K.relu(1 - margin -y_pred)) + lamb * (1-y_true) * K.square(K.relu(y_pred - margin)), axis=-1) return result class Capsule(Layer): """编写自己的Keras层需要重写3个方法以及初始化方法 1.build(input_shape):这是你定义权重的地方。 这个方法必须设self.built = True，可以通过调用super([Layer], self).build()完成。 2.call(x):这里是编写层的功能逻辑的地方。 你只需要关注传入call的第一个参数：输入张量，除非你希望你的层支持masking。 3.compute_output_shape(input_shape): 如果你的层更改了输入张量的形状，你应该在这里定义形状变化的逻辑，这让Keras能够自动推断各层的形状。 4.初始化方法,你的神经层需要接受的参数 """ def __init__(self, num_capsule, dim_capsule, routings=3, share_weights=True, activation='squash', **kwargs): super(Capsule, self).__init__(**kwargs) # Capsule继承**kwargs参数 self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.share_weights = share_weights if activation == 'squash': self.activation = squash else: self.activation = activation.get(activation) # 得到激活函数 # 定义权重 def build(self, input_shape): input_dim_capsule = input_shape[-1] if self.share_weights: # 自定义权重 self.kernel = self.add_weight( name='capsule_kernel', shape=(1, input_dim_capsule, self.num_capsule * self.dim_capsule), initializer='glorot_uniform', trainable=True) else: input_num_capsule = input_shape[-2] self.kernel = self.add_weight( name='capsule_kernel', shape=(input_num_capsule, input_dim_capsule, self.num_capsule * self.dim_capsule), initializer='glorot_uniform', trainable=True) super(Capsule, self).build(input_shape) # 必须继承Layer的build方法 # 层的功能逻辑(核心) def call(self, inputs): if self.share_weights: hat_inputs = K.conv1d(inputs, self.kernel) else: hat_inputs = K.local_conv1d(inputs, self.kernel, [1], [1]) batch_size = K.shape(inputs)[0] input_num_capsule = K.shape(inputs)[1] hat_inputs = K.reshape(hat_inputs, (batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) hat_inputs = K.permute_dimensions(hat_inputs, (0, 2, 1, 3)) b = K.zeros_like(hat_inputs[:, :, :, 0]) for i in range(self.routings): c = softmax(b, 1) o = self.activation(K.batch_dot(c, hat_inputs, [2, 2])) if K.backend() == 'theano': o = K.sum(o, axis=1) if i < self.routings-1: b += K.batch_dot(o, hat_inputs, [2, 3]) if K.backend() == 'theano': o = K.sum(o, axis=1) return o def compute_output_shape(self, input_shape): # 自动推断shape return (None, self.num_capsule, self.dim_capsule) def MODEL(): input_image = Input(shape=(32, 32, 3)) x = Conv2D(64, (3, 3), activation='relu')(input_image) x = Conv2D(64, (3, 3), activation='relu')(x) x = AveragePooling2D((2, 2))(x) x = Conv2D(128, (3, 3), activation='relu')(x) x = Conv2D(128, (3, 3), activation='relu')(x) """ 现在我们将它转换为(batch_size, input_num_capsule, input_dim_capsule)，然后连接一个胶囊神经层。模型的最后输出是10个维度为16的胶囊网络的长度 """ x = Reshape((-1, 128))(x) # (None, 100, 128) 相当于前一层胶囊(None, input_num, input_dim) capsule = Capsule(num_capsule=10, dim_capsule=16, routings=3, share_weights=True)(x) # capsule-（None,10, 16) output = Lambda(lambda z: K.sqrt(K.sum(K.square(z), axis=2)))(capsule) # 最后输出变成了10个概率值 model = Model(inputs=input_image, output=output) return model if __name__ == '__main__': # 加载数据 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes) y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes) # 加载模型 model = MODEL() model.compile(loss=margin_loss, optimizer='adam', metrics=['accuracy']) model.summary() tfck = TensorBoard(log_dir='capsule') # 训练 data_augmentation = True if not data_augmentation: print('Not using data augmentation.') model.fit( x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), callbacks=[tfck], shuffle=True) else: print('Using real-time data augmentation.') # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by dataset std samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=0, # randomly rotate images in 0 to 180 degrees width_shift_range=0.1, # randomly shift images horizontally height_shift_range=0.1, # randomly shift images vertically horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images # Compute quantities required for feature-wise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(x_train) # Fit the model on the batches generated by datagen.flow(). model.fit_generator( datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, validation_data=(x_test, y_test), callbacks=[tfck], workers=4) ``` 以上为代码 运行后出现该问题 ![图片说明](https://img-ask.csdn.net/upload/201902/26/1551184741_476774.png) ![图片说明](https://img-ask.csdn.net/upload/201902/26/1551184734_845838.png) 用官方的胶囊网络keras实现更改为tf下的keras实现仍出现该错误。

<div class="post-text" itemprop="text"> <p>I am a newbie of <code>TensorFlow</code> in <code>Go</code>.</p> <p>There are some doubts during my first traing demo. I just find one optimizer in Go's <code>wrappers.go</code>. </p> <p>But i learn the demos of python,they has serveral optimizers. Like </p> <pre><code>GradientDescentOptimizer AdagradOptimizer AdagradDAOptimizer MomentumOptimizer AdamOptimizer FtrlOptimizer RMSPropOptimizer </code></pre> <p>The similar prefix of func like <code>ResourceApply... GradientDescent Adagrad AdagradDA Momentum Adam Ftrl RMSProp</code>.</p> <p>And they return a option.I don't know what are their purpose. I cant find the relation of them and optimizer.</p> <p>And how can i make a train in <code>Go</code> by <code>TensorFlow</code>.</p> <p>What should I should use like python's <code>tf.Variable</code> in <code>Go</code>?</p> </div>

tensorflow2.0如何实现参数共享

ubuntu下调用keras报错：No module named 'error'

cuda9.0和TensorFlow1.8.0已安装 import tensorflow也没有问题，就是再import keras出错，求大神解答! 报错如下： Using TensorFlow backend. Traceback (most recent call last): File "/home/zhangzhiyang/PycharmProjects/tensorflow1/test_keras.py", line 2, in <module> import keras File "/home/zhangzhiyang/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/__init__.py", line 3, in <module> from . import utils File "/home/zhangzhiyang/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/utils/__init__.py", line 26, in <module> from .multi_gpu_utils import multi_gpu_model File "/home/zhangzhiyang/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/utils/multi_gpu_utils.py", line 7, in <module> from ..layers.merge import concatenate File "/home/zhangzhiyang/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/layers/__init__.py", line 4, in <module> from ..engine.base_layer import Layer File "/home/zhangzhiyang/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/engine/__init__.py", line 7, in <module> from .network import get_source_inputs File "/home/zhangzhiyang/anaconda3/envs/tensorflow/lib/python3.6/site-packages/keras/engine/network.py", line 9, in <module> import yaml File "/home/zhangzhiyang/anaconda3/envs/tensorflow/lib/python3.6/site-packages/yaml/__init__.py", line 2, in <module> from error import * ModuleNotFoundError: No module named 'error' 我的版本：tensorflow1.8.0,cuda9.0,cuDNN7,anaconda3,python3.6.5 我的tensorflow和keras安装路径均为anaconda3/envs/tensorflow/lib/python3.6/site-packages 我的.bashrc文件如下： export PATH="/home/zhangzhiyang/anaconda3/bin:\$PATH" export LD_LIBRARY_PATH="/home/zhangzhiyang/newdisk/cuda-9.0/lib64:\$LD_LIBRARY_PATH" export PATH="/home/zhangzhiyang/newdisk/cuda-9.0/bin:\$PATH" export CUDA_HOME=\$CUDA_HOME:"/home/zhangzhiyang/newdisk/cuda-9.0" 个人推测可能是python版本的问题，但不知如何解决，我第一次pip Keras未指定安装路径，结果keras安装在了python2.7下，这次我指定了路径为python3.6/site_packages,但是报了如上错误，是否keras不支持python3? 求大神解答！

keras使用报出OMP问题

Keras能否实现GRNN模型，如果可以那程序是怎么样的？

ImageDataGenerator默认的flow_from_directory函数中有个color_mode设置，我看文献中只支持‘gray'和'rgb'，但我现在要处理的图像是RGBD的4通道图像，如何设置呢？求大师指点。 我尝试着将color_mode设置为'rgb'，但是在第一层卷积层的输入数据类型，设置的是(width,height,4)的四通道格式，运行的时候出错了，提示如果我的color_mode设置成了‘rgb'，那么自动生成batch的时候，依旧是会变为3通道格式。具体如下： 在flow_from_directory中的color为‘rgb' ``` train_generator = train_datagen.flow_from_directory( directory= train_dir, # this is the target directory target_size=(200, 200), # all images will be resized to 200x200 classes= potato_class, batch_size=60, color_mode= 'rgb', class_mode='sparse') ``` 在卷基层的输入input_shape中设置为4通道 ``` model = Sequential() # CNN构建 model.add(Convolution2D( input_shape=(200, 200, 4), # input_shape=(1, Width, Height), filters=16, kernel_size=3, strides=1, padding='same', data_format='channels_last', name='CONV_1' )) ``` 运行后的错误提示如下： ValueError: Error when checking input: expected CONV_1_input to have shape (None, 200, 200, 4) but got array with shape (60, 200, 200, 3) 怎样才能让keras接受4通道图像呢？我在stackOverflow中看到有人留言说4通道是支持的，但是我没有找到代码。

Java校招入职华为，半年后我跑路了

Java基础知识面试题（2020最新版）

@程序员：GitHub这个项目快薅羊毛

C++(继承):19---虚基类与虚继承（virtual）

loonggg读完需要3分钟速读仅需 1 分钟大家好，我是你们的校长。我之前讲过，这年头，只要肯动脑，肯行动，程序员凭借自己的技术，赚钱的方式还是有很多种的。仅仅靠在公司出卖自己的劳动时...

win10暴力查看wifi密码

MySQL数据库面试题（2020最新版）

!大部分程序员只会写3年代码

2020阿里全球数学大赛：3万名高手、4道题、2天2夜未交卷