#include
#include
#include
#include
#include
//#include "user.h"
#include
#include
#include
#include
#include
#include

#define _CRT_SECURE_NO_WARNINGS
#define _USE_MATH_DEFINES

using namespace std;

#define OUT_COUT 8 //输出向量维数
#define IN_COUT 72 //输入向量维数
#define COUT 792 //样本数量
#define NN 99 //单个样本数量

typedef struct { //bp人工神经网络结构
int h; //实际使用隐层数量
//double v[IN_COUT][IN_COUT/3]; //隐藏层权矩阵i,隐层节点最大数量为50
double v[IN_COUT/3][IN_COUT];
//double w[IN_COUT/3][OUT_COUT]; //输出层权矩阵
double w[OUT_COUT][IN_COUT/3];

``````double b1[IN_COUT/3];
double b2[OUT_COUT];
double x[COUT][IN_COUT];
double y[COUT][OUT_COUT];

double a;          //学习率
double b;          //精度控制参数
int LoopCout;      //最大循环次数
``````

} bp_nn;

double fnet(double net) { //Sigmoid函数,神经网络激活函数
//return 1/(1+exp(-net));
return 2.0/(1+exp(-net))-1.0;
}

int InitBp(bp_nn *bp) { //初始化bp网络

``````/*printf("请输入隐层节点数，最大数为100：\n");
scanf_s("%d", &(*bp).h);

printf("请输入学习率：\n");
scanf_s("%lf", &(*bp).a);    //(*bp).a为double型数据，所以必须是lf

printf("请输入精度控制参数：\n");
scanf_s("%lf", &(*bp).b);

printf("请输入最大循环次数：\n");
scanf_s("%d", &(*bp).LoopCout);*/

(*bp).h = 24;
(*bp).a = 0.4;
(*bp).b = 0.0001;
(*bp).LoopCout = 2000;

int i, j;
srand((unsigned)time(NULL));
for (i = 0; i < (*bp).h; i++)
{
for (j = 0; j < IN_COUT; j++)
{
(*bp).v[i][j] = rand() / (double)(RAND_MAX);
}
(*bp).b1[i] = rand()/(double)(RAND_MAX);
}

for (i = 0; i < OUT_COUT; i++)
{
for (j = 0; j < (*bp).h; j++)
{
(*bp).w[i][j] = rand() / (double)(RAND_MAX);
}
(*bp).b2[i] = rand()/(double)(RAND_MAX);
}

return 1;
``````

}

int TrainBp(bp_nn *bp, double x[COUT][IN_COUT], double y[COUT][OUT_COUT]) {
//训练bp网络，样本为x，理想输出为y
double f = (*bp).b; //精度控制参数
double a = (*bp).a; //学习率
int h = (*bp).h; //隐层节点数
//double v[IN_COUT][IN_COUT/3], w[IN_COUT/3][OUT_COUT]; //权矩阵
double v[IN_COUT/3][IN_COUT], w[OUT_COUT][IN_COUT/3]; //权矩阵
double b1[IN_COUT/3],b2[OUT_COUT];
double ChgH[IN_COUT/3], ChgO[OUT_COUT]; //修改量矩阵
double O1[IN_COUT/3], O2[OUT_COUT]; //隐层和输出层输出量
int LoopCout = (*bp).LoopCout; //最大循环次数
int i, j, k, n;
double temp;

``````for (i = 0; i < h; i++)
{// 复制结构体中的权矩阵
for (j = 0; j < IN_COUT; j++)
{
v[i][j] = (*bp).v[i][j];
}
b1[i] = (*bp).b1[i];
}

for (i = 0; i < OUT_COUT; i++)
{
for (j = 0; j < h; j++)
{
w[i][j] = (*bp).w[i][j];
}
b2[i] = (*bp).b2[i];
}

double e = f + 1;
for (n = 0; e > f && n < LoopCout; n++)
{ //对每个样本训练网络
e = 0;
for (i= 0; i < COUT; i++)
{
for (k= 0; k < h; k++)
{          //计算隐层输出向量
temp = 0;
for (j = 0; j < IN_COUT; j++)
temp = temp + x[i][j] * v[k][j];
O1[k] = fnet(temp+(*bp).b1[i]);
}

for (k = 0; k < OUT_COUT; k++)
{ //计算输出层输出向量
temp = 0;
for (j = 0; j < h; j++)
temp = temp + O1[j] * w[k][j];
O2[k] = fnet(temp+(*bp).b2[k]);
}

for (j = 0; j < OUT_COUT; j++)
{//计算输出层的权修改量
ChgO[j] = O2[j] * (1 - O2[j]) * (y[i][j] - O2[j]);
}

for (j = 0; j < OUT_COUT ; j++)
{//计算输出误差
e = e + (y[i][j] - O2[j]) * (y[i][j] - O2[j]);
}

for (j = 0; j < OUT_COUT; j++)
{         //计算隐层权修改量
temp = 0;
for (k = 0; k < h; k++)
temp = temp + w[j][k] * ChgO[k];
ChgH[j] = temp * O1[j] * (1 - O1[j]);
}

for (j = 0; j < OUT_COUT; j++)
{//修改输出层权矩阵
for (k = 0; k < h; k++)
{
w[j][k] = w[j][k] + a * O1[j] * ChgO[k];
}
}
for (j = 0; j < h; j++)
{
for (k = 0; k < IN_COUT; k++)
{
v[j][k] = v[j][k] + a * x[i][j] * ChgH[k];
}
}
}
if (n % 10 == 0)
printf("误差 : %f\n", e);
}

printf("总共循环次数：%d\n", n);
printf("调整后的隐层权矩阵：\n");
for (i = 0; i < h; i++) {
for (j = 0; j < IN_COUT; j++)
printf("%f    ", v[i][j]);
printf("\n");
}
printf("调整后的输出层权矩阵：\n");
for (i = 0; i < OUT_COUT; i++) {
for (j = 0; j < h; j++)
printf("%f    ", w[i][j]);
printf("\n");
}
for (i = 0; i < h; i++)
{//把结果复制回结构体
for (j = 0; j < IN_COUT; j++)
{
(*bp).v[i][j] = v[i][j];
}
(*bp).b1[i] = b1[i];
}
for (i = 0; i < OUT_COUT; i++)
{
for (j = 0; j < h; j++)
{
(*bp).w[i][j] = w[i][j];
}
(*bp).b2[i] = b2[i];
}
printf("bp网络训练结束！\n");

return 1;
``````

}

int UseBp(bp_nn *bp) { //使用bp网络
float Input[IN_COUT];
double O1[50];
double O2[OUT_COUT]; //O1为隐层输出,O2为输出层输出
while (1) { //持续执行，除非中断程序
printf("请输入3个数：\n");
int i, j;
for (i = 0; i < IN_COUT; i++)
scanf_s("%f", &Input[i]);
double temp;
for (i = 0; i < (*bp).h; i++) {
temp = 0;
for (j = 0; j < IN_COUT; j++)
temp += Input[j] * (*bp).v[j][i];
O1[i] = fnet(temp-(*bp).b1[i]);
}
for (i = 0; i < OUT_COUT; i++) {
temp = 0;
for (j = 0; j < (*bp).h; j++)
temp += O1[j] * (*bp).w[j][i];
O2[i] = fnet(temp-(*bp).b2[i]);
}
printf("结果： ");
for (i = 0; i < OUT_COUT; i++)
printf("%.3f ", O2[i]);
printf("\n");
}
return 1;
}

{
//bp_nn bp1;
ifstream fileinput1;
ifstream fileinput2;
ifstream fileinput3;
ifstream fileinput4;
ifstream fileinput5;
ifstream fileinput6;
ifstream fileinput7;
ifstream fileinput8;

``````fileinput1.open("emgclose.txt");
fileinput2.open("emgopen.txt");
fileinput3.open("emgext.txt");
fileinput4.open("emgfle.txt");
fileinput5.open("emgsph.txt");
fileinput6.open("emgcyl.txt");
fileinput7.open("emgtri.txt");
fileinput8.open("emgkey.txt");
for(int m = 0;m< NN;++m)
{
for(int i =0;i < IN_COUT;++i)
{
fileinput1 >> x[m][i];
}
}

for(int m = NN;m<2*NN;++m)
{
for(int i =0;i < IN_COUT;++i)
{
fileinput2 >> x[m][i];
}
}

for(int m = 2*NN;m<3*NN;++m)
{
for(int i =0;i < IN_COUT;++i)
{
fileinput3 >> x[m][i];
}
}

for(int m = 3*NN;m<4*NN;++m)
{
for(int i =0;i < IN_COUT;++i)
{
fileinput4 >> x[m][i];
}
}

for(int m = 4*NN;m<5*NN;++m)
{
for(int i =0;i < IN_COUT;++i)
{
fileinput5 >> x[m][i];
}
}

for(int m = 5*NN;m<6*NN;++m)
{
for(int i =0;i < IN_COUT;++i)
{
fileinput6 >> x[m][i];
}
}

for(int m = 6*NN;m<7*NN;++m)
{
for(int i =0;i < IN_COUT;++i)
{
fileinput7 >> x[m][i];
}
}

for(int m = 7*NN;m<8*NN;++m)
{
for(int i =0;i < IN_COUT;++i)
{
fileinput8 >> x[m][i];
}
}

fileinput1.close();
fileinput2.close();
fileinput3.close();
fileinput4.close();
fileinput5.close();
fileinput6.close();
fileinput7.close();
fileinput8.close();

ifstream fileinput;
fileinput.open("teach.txt");

for (int m2 = 0; m2 < OUT_COUT; m2++)
{
for (int i = 0; i < OUT_COUT; i++)
{
fileinput>>y[m2][i];
}
for (int j = m2*NN; j < (m2+1)*NN; j++)
{
for (int k = 0; k < OUT_COUT; k++)
{
y[j][k] = y[m2][k];
}
}

}
for (int i = 0; i < NN; i++)
{
for (int j = 0; j < OUT_COUT; j++)
{
y[i][j] = y[0][j];
}
}

fileinput.close();

double Mininput[IN_COUT] = {0.0};
double Maxinput[IN_COUT] = {0.0};
//找出训练的数据相应的最大值、最小值，为归一化做准备
for (int i = 0; i < IN_COUT; i++)
{
Mininput[i] = Maxinput[i] = x[0][i];
for (int j = 0; j < COUT; j++)
{
Mininput[i] = Mininput[i] < x[j][i]?Mininput[i]:x[j][i];
Maxinput[i] = Maxinput[i] > x[j][i]?Maxinput[i]:x[j][i];
}
}

//归一化处理
for (int i = 0; i < OUT_COUT; i++)
{
for (int j = 0; j < COUT; j++)
{
y[j][i] = 2.0*(y[j][i] - 0.0)/(1.0 - 0.0)-1.0;
}
}

for (int i = 0; i < IN_COUT; i++)
{
for (int j = 0; j < COUT; j++)
{
//X[i][j] = (X1[i][j] - Mininput[i]+1)/(Maxinput[i] - Mininput[i]+1);
x[j][i] = 2.0*(x[j][i] - Mininput[i])/(Maxinput[i] - Mininput[i])-1.0;
}
}
``````

}

int main()
{
/* float x[COUT][IN_COUT] = {{0.8,0.5,0},
{0.9,0.7,0.3},
{1,0.8,0.5},
{0,0.2,0.3},
{0.2,0.1,1.3},
{0.2,0.7,0.8}}; //训练样本
int y[COUT][OUT_COUT] = {{0,1},
{0,1},
{0,1},
{1,0},
{1,0},
{1,0}}; */ //理想输出

``````bp_nn bp;

InitBp(&bp);                    //初始化bp网络结构
TrainBp(&bp, bp.x, bp.y);             //训练bp神经网络
``````

// UseBp(&bp); //测试bp神经网络

``````return 1;
``````

}

1个回答

http://www.hankcs.com/ml/back-propagation-neural-network.html

/*c语言实现*/遗传算法改进BP神经网络原理和算法实现怎么弄？

![图片说明](https://img-ask.csdn.net/upload/201912/26/1577352928_112062.jpg) 上图： 1.蓝色部分表示一类数据，红色部分表示二类数据 2.数据都只有一个特征向量为y轴的值 3.x轴为数据在训练集中的下标 问题： 本来应该是很简单的，利用一个特征向量（y轴的值）作为分类依据，将红色数据和蓝色数据分开。 然而，利用matlab建立BP神经网络，训练结果有时候非常好，测试正确率能达到98以上，有时候正确率只有20~30。 ``` %训练样本 train_sample=[trainECG']; %21*25 %测试样本 test_sample=[testECG']; %输出类别，建立HotCode t1=[trainLabel']; t2=[testLabel']; train_result = ind2vec(t1); test_result = ind2vec(t2); net = newff(train_sample,train_result,4,{ 'tansig' 'purelin' } ,'traingdx'); net.trainParam.show=50; % 显示训练迭代过程 net.trainParam.epochs=15000; % 最大训练磁数 net.trainParam.goal=0.001; % 要求训练精度 net.trainParam.lr=0.02; % 学习率 net=init(net); %网络初始化 [net,tr]=train(net,train_sample,train_result); % 网络训练 result_sim=sim(net,test_sample); % 利用得到的神经网络仿真 result_sim_ind=vec2ind(result_sim); correct=0; for i=1:length(result_sim_ind) if result_sim_ind(i)==t2(i); correct=correct+1; end end disp('正确率：'); correct / length(result_sim_ind) ```

bp神经网络的参数问题

BP神经网络预测效果不好是什么原因？
matlab中的bp神经网络参数问题
bp神经网络有哪些参数，有多少参数；每个参数代表什么意义；运用的时候，参数的多少会有什么影响，还是最好的拟合结果就好
MATLAB智能算法第29个案例报错
%% 清空环境变量 clear all clc %% 导入数据 load concrete_data.mat % 随机产生训练集和测试集 n = randperm(size(attributes,2)); % 训练集——80个样本 p_train = attributes(:,n(1:80))'; t_train = strength(:,n(1:80))'; % 测试集——23个样本 p_test = attributes(:,n(81:end))'; t_test = strength(:,n(81:end))'; %% 数据归一化 % 训练集 [pn_train,inputps] = mapminmax(p_train'); pn_train = pn_train'; pn_test = mapminmax('apply',p_test',inputps); pn_test = pn_test'; % 测试集 [tn_train,outputps] = mapminmax(t_train'); tn_train = tn_train'; tn_test = mapminmax('apply',t_test',outputps); tn_test = tn_test'; %% SVM模型创建/训练 % 寻找最佳c参数/g参数 [c,g] = meshgrid(-10:0.5:10,-10:0.5:10); [m,n] = size(c); cg = zeros(m,n); eps = 10^(-4); v = 5; bestc = 0; bestg = 0; error = Inf; for i = 1:m for j = 1:n cmd = ['-v ',num2str(v),' -t 2',' -c ',num2str(2^c(i,j)),' -g ',num2str(2^g(i,j) ),' -s 3 -p 0.1']; cg(i,j) = svmtrain(tn_train,pn_train,cmd); if cg(i,j) < error error = cg(i,j); bestc = 2^c(i,j); bestg = 2^g(i,j); end if abs(cg(i,j) - error) <= eps && bestc > 2^c(i,j) error = cg(i,j); bestc = 2^c(i,j); bestg = 2^g(i,j); end end end % 创建/训练SVM cmd = [' -t 2',' -c ',num2str(bestc),' -g ',num2str(bestg),' -s 3 -p 0.01']; model = svmtrain(tn_train,pn_train,cmd); %% SVM仿真预测 [Predict_1,error_1] = svmpredict(tn_train,pn_train,model); [Predict_2,error_2] = svmpredict(tn_test,pn_test,model); % 反归一化 predict_1 = mapminmax('reverse',Predict_1,outputps); predict_2 = mapminmax('reverse',Predict_2,outputps); % 结果对比 result_1 = [t_train predict_1]; result_2 = [t_test predict_2]; %% 绘图 figure(1) plot(1:length(t_train),t_train,'r-*',1:length(t_train),predict_1,'b:o') grid on legend('真实值','预测值') xlabel('样本编号') ylabel('耐压强度') string_1 = {'训练集预测结果对比'; ['mse = ' num2str(error_1(2)) ' R^2 = ' num2str(error_1(3))]}; title(string_1) figure(2) plot(1:length(t_test),t_test,'r-*',1:length(t_test),predict_2,'b:o') grid on legend('真实值','预测值') xlabel('样本编号') ylabel('耐压强度') string_2 = {'测试集预测结果对比'; ['mse = ' num2str(error_2(2)) ' R^2 = ' num2str(error_2(3))]}; title(string_2) %% BP 神经网络 % 数据转置 pn_train = pn_train'; tn_train = tn_train'; pn_test = pn_test'; tn_test = tn_test'; % 创建BP神经网络 net = newff(pn_train,tn_train,10); % 设置训练参数 net.trainParam.epcohs = 1000; net.trainParam.goal = 1e-3; net.trainParam.show = 10; net.trainParam.lr = 0.1; % 训练网络 net = train(net,pn_train,tn_train); % 仿真测试 tn_sim = sim(net,pn_test); % 均方误差 E = mse(tn_sim - tn_test); % 决定系数 N = size(t_test,1); R2=(N*sum(tn_sim.*tn_test)-sum(tn_sim)*sum(tn_test))^2/((N*sum((tn_sim).^2)-(sum(tn_sim))^2)*(N*sum((tn_test).^2)-(sum(tn_test))^2)); % 反归一化 t_sim = mapminmax('reverse',tn_sim,outputps); % 绘图 figure(3) plot(1:length(t_test),t_test,'r-*',1:length(t_test),t_sim,'b:o') grid on legend('真实值','预测值') xlabel('样本编号') ylabel('耐压强度') string_3 = {'测试集预测结果对比(BP神经网络)'; ['mse = ' num2str(E) ' R^2 = ' num2str(R2)]}; title(string_3) 错误使用svmtrain（line233） Y must be a vector or a character array. 出错 main(line 48) cg (i,j)=svmtrain (tr_train,pn_train,cmd);我源码没改过，是不是因为版本的不同？我用的是2014a

Matlab仿真成像出来的图不聚焦，代码里面改变目标位置的横坐标后在出来的图像中横坐标还是没有改变

Perfect Pth Powers 正确实现的方式
Description We say that x is a perfect square if, for some integer b, x = b2. Similarly, x is a perfect cube if, for some integer b, x = b3. More generally, x is a perfect pth power if, for some integer b, x = bp. Given an integer x you are to determine the largest p such that x is a perfect pth power. Input Each test case is given by a line of input containing x. The value of x will have magnitude at least 2 and be within the range of a (32-bit) int in C, C++, and Java. A line containing 0 follows the last test case. Output For each test case, output a line giving the largest integer p such that x is a perfect pth power. Sample Input 17 1073741824 25 0 Sample Output 1 30 2

bp神经网络的c语言实例求解～

tensorflow实现BP算法遇到了问题，求大神指点！！！
import tensorflow as tf import numpy as np #from tensorflow.examples.tutorials.mnist import input_data #载入数据集 #mnist = input_data.read_data_sets("MNIST_data",one_hot=True) #每个批次的大小 #batch_size = 100 #????????????????????????????????? #计算一共有多少个批次 #n_batch = mnist.train.num_examples // batch_size #定义placeholder x_data=np.mat([[0.4984,0.5102,0.5213,0.5340], [0.5102,0.5213,0.5340,0.5407], [0.5213,0.5340,0.5407,0.5428], [0.5340,0.5407,0.5428,0.5530], [0.5407,0.5428,0.5530,0.5632], [0.5428,0.5530,0.5632,0.5739], [0.5530,0.5632,0.5739,0.5821], [0.5632,0.5739,0.5821,0.5920], [0.5739,0.5821,0.5920,0.5987], [0.5821,0.5920,0.5987,0.6043], [0.5920,0.5987,0.6043,0.6095], [0.5987,0.6043,0.6095,0.6161], [0.6043,0.6095,0.6161,0.6251], [0.6095,0.6161,0.6251,0.6318], [0.6161,0.6251,0.6318,0.6387], [0.6251,0.6318,0.6387,0.6462], [0.6318,0.6387,0.6462,0.6518], [0.6387,0.6462,0.6518,0.6589], [0.6462,0.6518,0.6589,0.6674], [0.6518,0.6589,0.6674,0.6786], [0.6589,0.6674,0.6786,0.6892], [0.6674,0.6786,0.6892,0.6988]]) y_data=np.mat([[0.5407], [0.5428], [0.5530], [0.5632], [0.5739], [0.5821], [0.5920], [0.5987], [0.6043], [0.6095], [0.6161], [0.6251], [0.6318], [0.6387], [0.6462], [0.6518], [0.6589], [0.6674], [0.6786], [0.6892], [0.6988], [0.7072]]) xs = tf.placeholder(tf.float32,[None,4]) # 样本数未知，特征数为1，占位符最后要以字典形式在运行中填入 ys = tf.placeholder(tf.float32,[None,1]) #创建一个简单的神经网络 W1 = tf.Variable(tf.truncated_normal([4,10],stddev=0.1)) b1 = tf.Variable(tf.zeros([10])+0.1) L1 = tf.nn.tanh(tf.matmul(x,W1)+b1) W2 = tf.Variable(tf.truncated_normal([10,1],stddev=0.1)) b2 = tf.Variable(tf.zeros([1])+0.1) L2 = tf.nn.softmax(tf.matmul(L1,W2)+b2) #二次代价函数 #loss = tf.reduce_mean(tf.square(y-prediction)) #loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys,logits=prediction)) loss = tf.reduce_mean(tf.reduce_sum(tf.square((ys-L2)),reduction_indices = [1]))#需要向相加索引号，redeuc执行跨纬度操作 #使用梯度下降法 #train_step = tf.train.GradientDescentOptimizer(0.1).mnimize(loss) train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) #train = tf.train.GradientDescentOptimizer(0.1).minimize(loss) # 选择梯度下降法 #初始化变量 #init = tf.global_variables_initializer() init = tf.initialize_all_variables() #结果存放在一个布尔型列表中 #correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1)) #求准确率 #accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) with tf.Session() as sess: sess.run(init) for epoch in range(21): for i in range(22): #batch_xs,batch_ys = mnist.train.next_batch(batch_size) #????????????????????????? sess.run(train_step,feed_dict={xs:x_data,ys:y_data}) #test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1.0}) #train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1.0}) print (sess.run(prediction,feed_dict={xs:x_data,ys:y_data})) 提示：WARNING:tensorflow:From <ipython-input-10-578836c021a3>:89 in <module>.: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02. Instructions for updating: Use `tf.global_variables_initializer` instead. --------------------------------------------------------------------------- InvalidArgumentError Traceback (most recent call last) C:\Users\Administrator\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args) 1020 try: -> 1021 return fn(*args) 1022 except errors.OpError as e: C:\Users\Administrator\Anaconda3\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata) 1002 feed_dict, fetch_list, target_list, -> 1003 status, run_metadata) 1004 。。。

Java学习的正确打开方式

linux系列之常用运维命令整理笔录

Python十大装B语法
Python 是一种代表简单思想的语言，其语法相对简单，很容易上手。不过，如果就此小视 Python 语法的精妙和深邃，那就大错特错了。本文精心筛选了最能展现 Python 语法之精妙的十个知识点，并附上详细的实例代码。如能在实战中融会贯通、灵活使用，必将使代码更为精炼、高效，同时也会极大提升代码B格，使之看上去更老练，读起来更优雅。 1. for - else 什么？不是 if 和 else 才

2019年11月中国大陆编程语言排行榜
2019年11月2日，我统计了某招聘网站，获得有效程序员招聘数据9万条。针对招聘信息，提取编程语言关键字，并统计如下： 编程语言比例 rank pl_ percentage 1 java 33.62% 2 c/c++ 16.42% 3 c_sharp 12.82% 4 javascript 12.31% 5 python 7.93% 6 go 7.25% 7

SQL-小白最佳入门sql查询一

【图解经典算法题】如何用一行代码解决约瑟夫环问题

“狗屁不通文章生成器”登顶GitHub热榜，分分钟写出万字形式主义大作

IT界知名的程序员曾说：对于那些月薪三万以下，自称IT工程师的码农们，其实我们从来没有把他们归为我们IT工程师的队伍。他们虽然总是以IT工程师自居，但只是他们一厢情愿罢了。 此话一出，不知激起了多少(码农)程序员的愤怒，却又无可奈何，于是码农问程序员。 码农：你知道get和post请求到底有什么区别？ 程序员：你看这篇就知道了。 码农：你月薪三万了？ 程序员：嗯。 码农：你是怎么做到的? 程序员：
《程序人生》系列-这个程序员只用了20行代码就拿了冠军

11月8日，由中国信息通信研究院、中国通信标准化协会、中国互联网协会、可信区块链推进计划联合主办，科技行者协办的2019可信区块链峰会将在北京悠唐皇冠假日酒店开幕。 　　区块链技术被认为是继蒸汽机、电力、互联网之后，下一代颠覆性的核心技术。如果说蒸汽机释放了人类的生产力，电力解决了人类基本的生活需求，互联网彻底改变了信息传递的方式，区块链作为构造信任的技术有重要的价值。 　　1

【技巧总结】位运算装逼指南

8年经验面试官详解 Java 面试秘诀
作者 | 胡书敏 责编 | 刘静 出品 | CSDN（ID：CSDNnews） 本人目前在一家知名外企担任架构师，而且最近八年来，在多家外企和互联网公司担任Java技术面试官，前后累计面试了有两三百位候选人。在本文里，就将结合本人的面试经验，针对Java初学者、Java初级开发和Java开发，给出若干准备简历和准备面试的建议。   Java程序员准备和投递简历的实

1.两种思维方式在求职面试中，经常会考察这种问题：北京有多少量特斯拉汽车？ 某胡同口的煎饼摊一年能卖出多少个煎饼？ 深圳有多少个产品经理？ 一辆公交车里能装下多少个乒乓球？ 一