yuv到rgb视频格式转换 66C

#define asm __asm

typedef unsigned char TUInt8; // [0..255]
typedef unsigned long TUInt32;
struct TARGB32 // 32 bit color
{
TUInt8 b,g,r,a; // a is alpha
};

struct TPicRegion // 一块颜色数据区的描述,便于参数传递
{
TARGB32 * pdata; // 颜色数据首地址
long byte_width; // 一行数据的物理宽度(字节宽度);
// abs(byte_width)有可能大于等于width*sizeof(TARGB32);
long width; // 像素宽度
long height; // 像素高度
};

// 那么访问一个点的函数可以写为:
__forceinline TARGB32 & Pixels( const TPicRegion & pic, const long x, const long y)
{
return ( (TARGB32 * )((TUInt8 * )pic.pdata + pic.byte_width * y) )[x];
}

// 颜色饱和函数
__forceinline long border_color( long color)
{
if (color > 255 )
return 255 ;
else if (color < 0 )
return 0 ;
else
return color;
}

__forceinline TARGB32 YUVToRGB32_float( const  TUInt8 Y, const  TUInt8 U, const  TUInt8 V)
{
    TARGB32 result;
    result.b= border_color( 1.164383 * (Y - 16) + 2.017232*(U - 128) );
    result.g= border_color( 1.164383 * (Y - 16) - 0.391762*(U - 128) - 0.812968*(V - 128) );
    result.r= border_color( 1.164383 * (Y - 16) + 1.596027*(V - 128) );
    result.a =   255 ;
     return  result;
}

void DECODE_YUYV_Float( const TUInt8 * pYUYV, const TPicRegion & DstPic)
{
assert((DstPic.width & 1 ) == 0 );

TARGB32 *  pDstLine = DstPic.pdata; 
 for  ( long  y = 0 ;y < DstPic.height; ++ y)
{
     for  ( long  x = 0 ;x < DstPic.width;x += 2 )
    {
        pDstLine[x + 0 ] = YUVToRGB32_float(pYUYV[ 0 ],pYUYV[ 1 ],pYUYV[ 3 ]);
        pDstLine[x + 1 ] = YUVToRGB32_float(pYUYV[ 2 ],pYUYV[ 1 ],pYUYV[ 3 ]);
        pYUYV += 4 ;
    }
    ((TUInt8 *& )pDstLine) += DstPic.byte_width;
}    

}

哪位大神帮忙写个 main()函数 让我看看具体输出是啥样的

1个回答

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
海康摄像头SDK实现YUV到RGB的转换实现基于OpenCV的实现
海康SDK的回调格式是YUV 我需要OpenCV转到RGB可用于解析二维码
由YUV转换成RGB后,得到了首地址pOut,和长度length,请问怎么在OPENCV 中显示呢?
由YUV转换成RGB后,得到了数据的首地址pOut,和长度length,请问怎么在OPENCV 中显示出图片呢?
opencv cvcvtcolor函数 将RGB转为YUV
![YUV](https://img-ask.csdn.net/upload/201504/13/1428924331_530409.png) ![RGB](https://img-ask.csdn.net/upload/201504/13/1428924318_594405.png) 转换后图片怎么变成这样了
关于RGB,YUV互换,颜色失真的问题
我想要实现的功能是将RGB转为YUV,YUV输出的大小要为8位整数,我通过下面图片的代码进行转换然后还原,发现图像有失真,不知什么原因呢??求大神指点 ![图片说明](https://img-ask.csdn.net/upload/201503/21/1426930463_889285.jpg)
安卓手机实时视频传输
我要写一篇论文,不知道思路对不对,麻烦各位牛人给看看。 我是要做一个APP,我的思路是通过安卓自带摄像头采集视频并预览,回调函数获取实时视频帧数据,通过wifi环境下的Socket UDP传输采集到的原始YUV420sp数据到另一台手机上去,另一台手机上将获得的YUV420sp数据转换成RGB格式的,然后用SurfaceView控件显示出来。 就整个思路有没有逻辑问题?有没有技术难题?比如,YUV能通过socket传输吗?YUV转RGB后,SurfaceView能显示出来吗? 这对我很重要。希望大家给个答案。谢谢了。跪谢。
android如何直接将解码出来的yuv显示
android如何直接将解码出来的yuv显示,目前我在项目中是将yuv转换成rgb565来显示,效率太低了。。。希望高手能给一个可行的方案。。。。
怎样用c语言把jpeg转换成rgb24
在做视频采集和压缩,哪位大神帮帮忙,把jpeg直接转成yuv的也行
rgb转byte数组应该怎么处理
我在看YUV420SP转RGB的时候有如下一段java代码 if (r < 0) r = 0; else if (r > 262143) r = 262143; if (g < 0) g = 0; else if (g > 262143) g = 262143; if (b < 0) b = 0; else if (b > 262143) b = 262143; rgbBuf[yp * 3] = (byte)(r >> 10); rgbBuf[yp * 3 + 1] = (byte)(g >> 10); rgbBuf[yp * 3 + 2] = (byte)(b >> 10); 将rgb存放到一个byte[]数组中,但是这样转换的颜色有问题,人物是蓝色的,我从别的地方也看到相关颜色转换算法 int rgbTmp = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff); 第二种转换换后的是int型数据,但是我需要的是3个自己的byte数组,而且好像转换算法也不同,不知道那种是正确的
在arm上利用Qt采集视频图像,能够保存采集的视频图像
我使用的是usb免驱摄像头,现在已经实现实时显示图像,并能够拍照保存,但无法实现保存视频。 以下是本程序的相关代码 # main.cpp ``` #include "camera.h" #include <QApplication> int main(int argc, char *argv[]) { QApplication a(argc, argv); camera w; w.show(); return a.exec(); } ``` # camera.cpp ``` #include "camera.h" #include "ui_camera.h" #include <QDate> #include <QTime> void yuv422to420p(char *yuv422buf, char *yuv420pbuf, int width, int height) { char *src, *dest, *dest2; int i, j; src = yuv422buf; dest = yuv420pbuf; for (i = 0; i < width * height * 2; i++) { if (i % 2 != 0) { continue; } *dest++ = *(src + i); } src = yuv422buf; dest = yuv420pbuf + width * height; dest2 = dest + (width * height) / 4; for (i = 0; i < height; i += 2) { for (j = 1; j < width * 2; j += 4) { *dest++ = *(src + i * width * 2 + j); *dest2++ = *(src + i * width * 2 + j + 2); } } } /* yuv格式转换为rgb格式的算法处理函数 */ int convert_yuv_to_rgb_pixel(int y, int u, int v) { unsigned int pixel32 = 0; unsigned char *pixel = (unsigned char *)&pixel32; int r, g, b; r = y + (1.370705 * (v-128)); g = y - (0.698001 * (v-128)) - (0.337633 * (u-128)); b = y + (1.732446 * (u-128)); if(r > 255) r = 255; if(g > 255) g = 255; if(b > 255) b = 255; if(r < 0) r = 0; if(g < 0) g = 0; if(b < 0) b = 0; pixel[0] = r ; pixel[1] = g ; pixel[2] = b ; return pixel32; } /* yuv格式转换为rgb格式 */ int convert_yuv_to_rgb_buffer(unsigned char *yuv, unsigned char *rgb, unsigned int width, unsigned int height) { unsigned int in, out = 0; unsigned int pixel_16; unsigned char pixel_24[3]; unsigned int pixel32; int y0, u, y1, v; for(in = 0; in < width * height * 2; in += 4) { pixel_16 = yuv[in + 3] << 24 | yuv[in + 2] << 16 | yuv[in + 1] << 8 | yuv[in + 0]; y0 = (pixel_16 & 0x000000ff); u = (pixel_16 & 0x0000ff00) >> 8; y1 = (pixel_16 & 0x00ff0000) >> 16; v = (pixel_16 & 0xff000000) >> 24; pixel32 = convert_yuv_to_rgb_pixel(y0, u, v); pixel_24[0] = (pixel32 & 0x000000ff); pixel_24[1] = (pixel32 & 0x0000ff00) >> 8; pixel_24[2] = (pixel32 & 0x00ff0000) >> 16; rgb[out++] = pixel_24[0]; rgb[out++] = pixel_24[1]; rgb[out++] = pixel_24[2]; pixel32 = convert_yuv_to_rgb_pixel(y1, u, v); pixel_24[0] = (pixel32 & 0x000000ff); pixel_24[1] = (pixel32 & 0x0000ff00) >> 8; pixel_24[2] = (pixel32 & 0x00ff0000) >> 16; rgb[out++] = pixel_24[0]; rgb[out++] = pixel_24[1]; rgb[out++] = pixel_24[2]; } return 0; } int camera::camera_init() { int ret=0,i=0,count=0; struct v4l2_capability cap; //视频设备的功能,对应命令VIDIOC_QUERYCAP struct v4l2_fmtdesc fmtdesc; //视频格式描述符类型 struct v4l2_format format; //帧的格式,如宽度,高度等,对应命令VIDIOC_G_FMT、VIDIOC_S_FMT等 struct v4l2_requestbuffers reqbuf; //向驱动申请帧缓冲请求,包含申请的个数,对应命令VIDIOC_REQBUFS struct v4l2_buffer buf; //驱动中的一帧图像缓存,对应命令VIDIOC_QUERYBUF fmtdesc.index = 0; fmtdesc.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; //传输流类型 ret = ::ioctl(fd, VIDIOC_G_FMT, &format); //'VIDIOC_G_FMT'——读取当前驱动的视频捕获格式 if(ret < 0){ perror("VIDIOC_G_FMT"); exit(1); } printf("width:%d\n", format.fmt.pix.width); printf("height:%d\n", format.fmt.pix.height); printf("pixelformat:%x\n", format.fmt.pix.pixelformat); printf("field:%x\n", format.fmt.pix.field); printf("bytesperline:%d\n", format.fmt.pix.bytesperline); printf("sizeimage:%d\n", format.fmt.pix.sizeimage); printf("colorspace:%d\n", format.fmt.pix.colorspace); format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; format.fmt.pix.width = 640; format.fmt.pix.height = 480; format.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; ret = ::ioctl(fd, VIDIOC_S_FMT, &format); if(ret < 0){ fprintf(stderr, "Not support jepg"); perror("VIDIOC_S_FMT"); exit(1); } reqbuf.count = 3; reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; reqbuf.memory = V4L2_MEMORY_MMAP; ret = ::ioctl(fd, VIDIOC_REQBUFS, &reqbuf); if(ret < 0){ perror("VIDIOC_REQBUFS"); exit(1); } bufinf = (struct bufinfor *)calloc(reqbuf.count, sizeof(struct bufinfor)); if(!bufinf){ perror("calloc"); exit(1); } for(count = 0; count < reqbuf.count; count++){ buf.index = count; buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; ret = ::ioctl(fd, VIDIOC_QUERYBUF, &buf); if(ret < 0){ perror("VIDIOC_REQBUFS"); exit(1); } bufinf[buf.index].length = buf.length; bufinf[buf.index].start = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf.m.offset); if(!(bufinf[buf.index].start)){ perror("mmap"); exit(1); } } for(i = 0; i < reqbuf.count; i++){ buf.index = i; buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; ret = ::ioctl(fd, VIDIOC_QBUF, &buf); if(ret < 0){ perror("VIDIOC_QBUF"); exit(1); } } enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = ::ioctl(fd, VIDIOC_STREAMON, &type); if(ret < 0){ perror("VIDIOC_STREAMON"); exit(1); } return 0; } camera::camera(QWidget *parent) : QMainWindow(parent), ui(new Ui::camera) { char devname[32]; int i=0; int ret; struct v4l2_capability cap; ui->setupUi(this); while(i < 100) { sprintf(devname,"/dev/video%d",i++); fd = ::open(devname,O_RDWR); if(fd < 0) { continue; } ui->comboBox->addItem(QWidget::tr(devname)); ::close(fd); } } camera::~camera() { free(bufinf); ::close(fd); delete ui; } void camera::moveEvent(QMoveEvent *) { this->move(QPoint(0,0)); } void camera::resizeEvent(QResizeEvent *) { this->showMaximized(); } void camera::on_pushButton_2_clicked() { take_photo(); } static bool take = 0; void camera::show_() { int ret; unsigned char *rgb=new unsigned char [640 * 480 *3]; struct v4l2_buffer buf; fd_set readset; FD_ZERO(&readset); FD_SET(fd, &readset); ret = select(fd + 1, &readset, NULL, NULL, NULL); if(ret < 0){ perror("select"); exit(1); } buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; ret = ioctl(fd, VIDIOC_DQBUF, &buf); if(ret < 0){ perror("VIDIOC_DQBUF"); exit(1); } convert_yuv_to_rgb_buffer((unsigned char *)bufinf[buf.index].start,rgb,640,480); ret = ioctl(fd, VIDIOC_QBUF, &buf); if(ret < 0){ perror("VIDIOC_QBUF"); exit(1); } QImage *mage = new QImage(rgb,640,480,QImage::Format_RGB888); if (take == 1) { mage->save(tr("%1.jpg").arg("/mnt/Photo/Photo_2019.04.15/IMG" + QDate::currentDate().toString("yyyyMMdd") + QTime::currentTime().toString("hhmmss")),"JPG"); // mage->save(tr("%1.jpg").arg("/home/root/Photo/IMG" + QDate::currentDate().toString("yyyyMMdd") + QTime::currentTime().toString("hhmmss")),"JPG"); take = 0; } QImage resultimg=mage->scaled(ui->label->size(),Qt::KeepAspectRatio,Qt::SmoothTransformation); ui->label->setPixmap(QPixmap::fromImage(resultimg)); delete mage; delete rgb; } void camera::take_photo() { take = 1; } void camera::on_comboBox_activated(const QString &arg1) { QString text=ui->comboBox->currentText(); QByteArray devtext=text.toLatin1(); char *devname=devtext.data(); int ret; struct v4l2_capability cap; fd = ::open(devname, O_RDWR); if(fd < 0) { perror("open error"); } camera::camera_init(); QTimer *timer; timer=new QTimer(); timer->setInterval(10); connect(timer,SIGNAL(timeout()),this,SLOT(show_())); timer->start(10); camera_flag=1; } void camera::on_pushButton_clicked() { close(); } ``` # camera.h ``` #ifndef CAMERA_H #define CAMERA_H #include <QMainWindow> #include <QTimer> #include <QImage> #include <QPixmap> #include <QDebug> #include <QStringList> #include <QByteArray> #include <QComboBox> extern "C"{ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/mman.h> #include <sys/types.h> #include <fcntl.h> #include <sys/ioctl.h> #include <string.h> #include <time.h> #include <sys/select.h> #include <sys/time.h> #include <sys/types.h> #include <unistd.h> #include <jpeglib.h> #include <linux/videodev2.h> } namespace Ui { class camera; } struct bufinfor{ void *start; unsigned int length; }; class camera : public QMainWindow { Q_OBJECT public: explicit camera(QWidget *parent = 0); ~camera(); protected: void moveEvent(QMoveEvent *); void resizeEvent(QResizeEvent *); private slots: int camera_init(void); void on_pushButton_2_clicked(); void show_(); void take_photo(); void on_comboBox_activated(const QString &arg1); void on_pushButton_clicked(); private: Ui::camera *ui; int fd; int camera_flag; struct bufinfor *bufinf; }; #endif // CAMERA_H ``` usbcamera.pro ``` #------------------------------------------------- # # Project created by QtCreator 2019-08-02T09:08:26 # #------------------------------------------------- QT += core gui greaterThan(QT_MAJOR_VERSION, 4): QT += widgets TARGET = usbcamera TEMPLATE = app LIBS += -L. -ljpeg SOURCES += main.cpp\ camera.cpp HEADERS += camera.h FORMS += camera.ui ``` 程序界面如下 ![图片说明](https://img-ask.csdn.net/upload/201908/16/1565962170_711562.jpg) ![图片说明](https://img-ask.csdn.net/upload/201908/16/1565962188_747692.jpg) 求助大神该怎么保存拍摄的视频 非常感谢
Android camera 数据处理
Android手机自带的camera,获取视频画面,如何不转换到RGB图像,直接在YUV格式上进行处理?想得到每帧中心点的像素值。转化的话,拖慢了帧率,
bitmap转YUV420之后的图片和原图有色差
转换算法如下 void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) { final int frameSize = width * height; int yIndex = 0; int uvIndex = frameSize; int a, R, G, B, Y, U, V; int index = 0; for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { R = (argb[index] & 0xff0000) >> 16; G = (argb[index] & 0xff00) >> 8; B = (argb[index] & 0xff) >> 0; // well known RGB to YUV algorithm Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16; U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128; V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128; // NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2 // meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other // pixel AND every other scanline. yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y)); if (j % 2 == 0 && index % 2 == 0) { yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V)); yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U)); } index++; } } }
我在网上找了个bmp转YUV的程序进行修改,但读不出数据,不知什么问题,有谁懂的请帮忙看下,谢谢了
#include <stdio.h> #include <stdlib.h> #include "bmp2rgb.h" u_int8_t BMP2RGB(BITMAPFILEHEADER file_header,BITMAPINFOHEADER info_header, FILE* bmpFile, u_int8_t* rgbBuf);//24bit RGB u_int8_t RGB24ToYUV420(int Width,int Height,u_int8_t* rgbBuf,u_int8_t*YuvBuffer); #define max(a,b) (((a)>(b))?(a):(b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) int main(int argc, char** argv) { //设置命令行参数 argv[1]= "boot_logo.bmp"; argv[2]="boot_logo.yuv"; //相当于设置文件名 char* bmpFileName = argv[1]; char* yuvFileName = argv[2]; //打开文件 FILE* bmpFile = fopen(bmpFileName, "rb"); if (bmpFile == NULL) { printf(" Open the BMP file.\n"); exit(1); } else { printf("The BMP file is %s\n", bmpFileName); } FILE* yuvFile = fopen(yuvFileName, "wb"); if (yuvFile == NULL) { printf("Cannot open the YUV file.\n"); exit(1); } else { printf("The YUV file is %s\n", yuvFileName); } //读取BMP文件头,信息头,读取错误时的处理代码 BITMAPFILEHEADER file_header; BITMAPINFOHEADER info_header; if (fread(&file_header, sizeof(BITMAPFILEHEADER), 1, bmpFile) != 1) if (file_header.bfType != 0x4D42) { printf("Not BMP file.\n"); exit(1); } if (fread(&info_header, sizeof(BITMAPINFOHEADER), 1, bmpFile) != 1) { printf("read info header error!"); exit(1); }//结束读取BMP文件头 //读取图像尺寸 int width = info_header.biWidth; int height = info_header.biHeight; //开辟缓冲区 buf u_int8_t* yBuf = (u_int8_t*)malloc(height*width); u_int8_t* uBuf = (u_int8_t*)malloc(height*width / 4); u_int8_t* vBuf = (u_int8_t*)malloc(height*width / 4); u_int8_t* rgbBuf = (u_int8_t*)malloc(height*width * 3); u_int8_t*YuvBuffer =(u_int8_t*)malloc(height*width * 5); if (yBuf == NULL || uBuf == NULL || vBuf == NULL || rgbBuf == NULL || YuvBuffer==NULL) { printf("Not enough memory\n"); exit(1); } //BMP与RGB的转换,得到RGB数据 if (BMP2RGB(file_header, info_header, bmpFile, rgbBuf)) { printf("BMP2RGB error\n"); exit(1); } //RGB与YUV的转换,得到YUV数据 // int flip = 0; /*读取到的图像数据是倒序存放的,flip=0保证了RGB2YUV可以正确地对其转换*/ /* if (RGB2YUV(width, height, rgbBuf, yBuf, uBuf, vBuf, flip)) { printf("RGB2YUV error\n"); exit(1); } //将yuv按顺序写入yuvfile文件 fwrite(yBuf, 1, width * height, yuvFile); fwrite(uBuf, 1, (width * height) / 4, yuvFile); fwrite(vBuf, 1, (width * height) / 4, yuvFile);*/ if( RGB24ToYUV420( width, height, rgbBuf,YuvBuffer)) { printf("RGB24ToYUV420 error\n"); exit(1); } int len=0; len= fwrite(YuvBuffer, 1,sizeof(YuvBuffer), yuvFile); printf("len ==%d byte\n",len); //打印宽高,方便yuv观看程序打开 printf("width is %d", width); printf("\n"); printf("height is %d", height); printf("\n"); //清理内存 free(rgbBuf); free(YuvBuffer); free(yBuf); free(uBuf); free(vBuf); fclose(bmpFile); fclose(yuvFile); return 0; } u_int8_t BMP2RGB(BITMAPFILEHEADER file_header,BITMAPINFOHEADER info_header, FILE* bmpFile, u_int8_t* rgbBuf) { BITMAPFILEHEADER file_h=file_header; BITMAPINFOHEADER info_h=info_header; FILE* pFile =bmpFile; int w=0,h=0; //确定像素的实际点阵数 w = (info_h.biWidth*info_h.biBitCount + 31) / 32 * 4;//w为实际一行的字节数 h = info_h.biHeight;//h为列数 // printf("w==%d,h==%d\n",w,h); //开辟实际字节数量的缓冲区,读数据,一次读取一个字节 u_int8_t* dataBuf = (u_int8_t*)malloc(w*h); /*使用文件头的字节偏移属性bfOffBits 直接把文件指针定位到像素值数据的起始 */ fseek(pFile, file_h.bfOffBits, 0); fread(dataBuf, 1, w*h, pFile); unsigned char* data = dataBuf; u_int8_t* rgb = rgbBuf; //开始写入rgb int i, j; for (j = 0; j < h; j++)//j控制行循环 { for (i = 0; i < w; i += 3)//i控制列循环 { *rgb = data[i + w*j];//B *(rgb + 1) = data[i + w*j + 1];//G *(rgb + 2) = data[i + w*j + 2];//R rgb += 3; } } //释放内存 free(dataBuf); return 0; } /***************************************************************************************************************/ u_int8_t RGB24ToYUV420(int Width,int Height,u_int8_t* rgbBuf,u_int8_t*YuvBuffer) { u_int8_t* yuvBuf=YuvBuffer;//YUV空间 int nWidth=Width; int nHeight=Height; /////////////////////下面转换算法是网上查到的 int i, j; u_int8_t*bufY = yuvBuf; u_int8_t*bufU = yuvBuf + nWidth * nHeight; u_int8_t*bufV = bufU + (nWidth* nHeight* 1/4); u_int8_t*Y=bufY; u_int8_t*U=bufU; u_int8_t*V=bufV; u_int8_t*bufRGB; unsigned char y, u, v, r, g, b; if (NULL==rgbBuf) { printf("NULL==rgbBuf\n"); return 1 ; } for (j = 0; j<nHeight;j++) { bufRGB = rgbBuf + nWidth * (nHeight - 1-j) * 3 ; for (i = 0;i<nWidth;i++) { int pos = nWidth * i + j; r= *(bufRGB++); g = *(bufRGB++); b = *(bufRGB++); y =(unsigned char)(( 66 * r + 129 * g + 25 * b + 128) >>8) + 16;//16 v = (unsigned char)((-38 * r - 74 * g + 112 * b + 128) >>8) +128 ; //128 u = (unsigned char)((112 * r - 94 * g - 18 * b + 128) >> 8) + 128 ; *(bufY++)=max(0,min(y, 255 )); if (j%2==0&&i%2 ==0) { if (u>255) { u=255; } if (u<0) { u = 0; } *(bufU++) =u; //存u分量 } else { //存v分量 if (i%2==0) { if (v>255) { v = 255; } if (v<0) { v = 0; } *(bufV++) =v; } } } } return 0; } ``` #include <stdio.h> #include "sys/types.h" #include <stdlib.h> typedef unsigned long DWORD;//32bit typedef unsigned short WORD;//16bit typedef unsigned long LONG; //32bit typedef struct tagBITMAPFILEHEADER { //0x00~0x01,说明文件的类型 WORD bfType; //0x02~0x05,说明文件的大小,用字节B为单位 DWORD bfSize; //0x06~0x07,保留,设置为0 WORD bfReserved1; //0x08~0x09,保留,设置为0 WORD bfReserved2; //0x0a~0x0d,说明从BITMAP_FILE_HEADER结构开始到实际的图像数据之间的字节偏移量 DWORD bfOffBits; } BITMAPFILEHEADER; typedef struct tagBITMAPINFOHEADER { //0x0e~0x11,说明当前结构体所需字节数 DWORD biSize; //0x12~0x15,以像素为单位说明图像的宽度 LONG biWidth; //0x16~0x19,以像素为单位说明图像的高度 LONG biHeight; //0x1a~0x1b,说明位面数,必须为1 WORD biPlanes; //0x1c~0x1d,说明图像的位深度 WORD biBitCount; //0x1e~0x21,说明图像是否压缩及压缩类型 DWORD biCompression; //0x22~0x25,以字节为单位说明图像大小,必须是4的整数倍 DWORD biSizeImage; //0x26~0x29,目标设备的水平分辨率,像素/米 LONG biXPelsPerMeter; //0x2a~0x2d,目标设备的垂直分辨率,像素/米 LONG biYPelsPerMeter; //0x2e~0x31,说明图像实际用到的颜色数,如果为0,则颜色数为2的biBitCount次方 DWORD biClrUsed; //0x32~0x35,说明对图像显示有重要影响的颜色索引的数目,如果是0,表示都重要。 DWORD biClrImportant; } BITMAPINFOHEADER; ``` 运行环境linux,bmp图片24位 运行结果: The BMP file is boot_logo.bmp The YUV file is boot_logo.yuv len ==8 byte width is 185729024 height is 0
最新的 ffmpeg 2.8 其实连yuy2数据包都不能正确转换
说了你也不相信,我用 directx 采集到的数据是从web camera来的 yuy2 数据,我用自己写的转换程序可以成功转换正确的图像rgb24, yuy2 -> rgb24 -> I420 都可以,但是用ffmpeg的sws__scale 怎么也无法正确转换。 sws__scale 的解码过程如下 void scaleYUY2toI420(const void *pSrc, int widthSrc, int heightSrc, void *pDest, int widthDest, int heightDest, bool bQualityImportant/* = false*/) { struct SwsContext *pctx; AVPicture picSrc = { 0 }; AVPicture picDst = { 0 }; enum AVPixelFormat fmtSrc = AV_PIX_FMT_YUYV422; enum AVPixelFormat fmtDst = AV_PIX_FMT_YUV420P; avpicture_fill(&picSrc, (uint8_t*)pSrc, fmtSrc, widthSrc, heightSrc); // avpicture_fill(&picDst, (uint8_t*)pDest, fmtDst, widthDest, heightDest); pctx = sws_getContext(widthSrc, heightSrc, fmtSrc, widthDest, heightDest, fmtDst, bQualityImportant ? SWS_BICUBIC : SWS_POINT, NULL, NULL, NULL); if (pctx == NULL) return; sws_scale(pctx, picSrc.data, picSrc.linesize, 0, heightSrc, picDst.data, picDst.linesize); sws_freeContext(pctx); } 问题在哪里始终不得要领,希望专家们不吝赐教。 为了验证问题,我用 ffmpeg 转换 rgb24 -> I420每问题,是不是ffmpeg不支持 yuy2 packed 包到 i420?但是我看它的像素格式定义, AV_PIX_FMT_YUYV422 排在最前面啊
从camera preview获取数据流问题求教!
本人android开发新人,以下是我为了从预览数据流抓取图片并存储而编写的代码。网上说“从camera读取到的预览(preview)图像流一定要输出到一个可见的(Visible)SurfaceView上,然后通过Camera.PreviewCallback的public void onPreviewFrame(byte[] data, Camera camera)函数来获得图像帧数据的拷贝。”那么显示在SurfaceView上的数据流还是完整的吧,不会出现预览停顿的现象吧?为什么我的程序运行之后,预览一闪而过,然后程序重启,如此反复三遍,报出进程意外停止呢?而且也没有保存到图像。这个程序会以什么频率保存图片啊?onPreviewFrame(byte[] data, Camera camera)是每一帧都保存吗?如何控制保存频率? 求大神细心讲解,在线求教!或者QQ:851391489,隐身状态。拜谢! package com.android.silentcamera; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.Iterator; import java.util.List; import java.util.UUID; import android.app.Activity; import android.content.ContentValues; import android.content.Context; import android.content.res.Configuration; import android.graphics.Bitmap; import android.graphics.Bitmap.CompressFormat; import android.hardware.Camera; import android.hardware.Camera.CameraInfo; import android.hardware.Camera.PreviewCallback; import android.media.AudioManager; import android.net.Uri; import android.os.Bundle; import android.os.Environment; import android.os.Handler; import android.provider.MediaStore.Images.Media; import android.util.Log; import android.view.SurfaceHolder; import android.view.SurfaceHolder.Callback; import android.view.SurfaceView; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; import android.widget.TextView; import android.widget.Toast; public class SilentCameraActivity extends Activity implements Camera.PreviewCallback, SurfaceHolder.Callback { SurfaceView mSurfaceView; SurfaceHolder mSurfaceHolder; Camera mCamera; public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.main); mSurfaceView = (SurfaceView) this.findViewById(R.id.surfaceview); mSurfaceHolder = mSurfaceView.getHolder(); mSurfaceHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); mSurfaceHolder.addCallback(this); } @Override public void surfaceDestroyed(SurfaceHolder holder) { // TODO Auto-generated method stub mCamera.stopPreview(); mCamera.release(); } @Override public void surfaceCreated(SurfaceHolder holder) { // TODO Auto-generated method stub //打开前置摄像头 mCamera = Camera.open(CameraInfo.CAMERA_FACING_FRONT); //开始预览 try { //设置哪个surfaceView显示图片 mCamera.setPreviewDisplay(mSurfaceHolder); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } //设置预览帧的接口,就是通过这个接口,我们来获得预览帧的数据的 mCamera.setPreviewCallback(SilentCameraActivity.this); mCamera.startPreview(); } @Override public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) { // TODO Auto-generated method stub mCamera.startPreview(); } //拍照+退出 public void onPreviewFrame(byte[] data, Camera camera) { int w = camera.getParameters().getPreviewSize().width; int h = camera.getParameters().getPreviewSize().height; save(w,h,data); finish(); } static public void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) { final int frameSize = width * height; for (int j = 0, yp = 0; j < height; j++) { int uvp = frameSize + (j >> 1) * width, u = 0, v = 0; for (int i = 0; i < width; i++, yp++) { int y = (0xff & ((int) yuv420sp[yp])) - 16; if (y < 0) y = 0; if ((i & 1) == 0) { v = (0xff & yuv420sp[uvp++]) - 128; u = (0xff & yuv420sp[uvp++]) - 128; } int y1192 = 1192 * y; int r = (y1192 + 1634 * v); int g = (y1192 - 833 * v - 400 * u); int b = (y1192 + 2066 * u); if (r < 0) r = 0; else if (r > 262143) r = 262143; if (g < 0) g = 0; else if (g > 262143) g = 262143; if (b < 0) b = 0; else if (b > 262143) b = 262143; rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff); } } } //保存 public void save(int w,int h,byte[] data){ //预览编码为YUV420SP的视频流,需转换为RGB编码 int[] RGBData = new int[w* h]; byte[] mYUVData = new byte[data.length]; System.arraycopy(data,0, mYUVData, 0,data.length); decodeYUV420SP(RGBData,mYUVData,w,h); //图片保存到sdcard final Bitmap bitmap = Bitmap.createBitmap(w,h, Bitmap.Config.ARGB_8888); try { FileOutputStream outputStream = new FileOutputStream(Environment.getExternalStorageDirectory().getPath() + UUID.randomUUID() + ".png"); bitmap.compress(CompressFormat.PNG, 50, outputStream); outputStream.close(); } catch(Exception e) { e.printStackTrace(); } } }
Android 采集摄像头数据 , 通过ffmpeg推送流至服务器
Android 采集摄像头数据 , 通过ffmpeg推送流, 通过AdobeMediaServer查看 , 为什么播放视频总是隔一段时间一缓冲 , 而且播放有延迟。求大神帮忙啊 ``` AVFormatContext *ofmt_ctx; AVStream* video_st; //视音频流对应的结构体,用于视音频编解码。 AVCodecContext* pCodecCtx; AVCodec* pCodec; AVPacket enc_pkt; // 存储压缩数据(视频对应H.264等码流数据,音频对应AAC/MP3等码流数据) AVFrame *pFrameYUV; // 存储非压缩的数据(视频对应RGB/YUV像素数据,音频对应PCM采样数据) int framecnt = 0; int yuv_width; int yuv_height; int y_length; int uv_length; int64_t start_time; //const char* out_path = "rtmp://192.168.2.176/live/livestream"; //Output FFmpeg's av_log() void custom_log(void *ptr, int level, const char* fmt, va_list vl) { FILE *fp = fopen("/storage/emulated/0/av_log.txt", "a+"); if (fp) { vfprintf(fp, fmt, vl); fflush(fp); fclose(fp); } } JNIEXPORT jint JNICALL Java_com_zhanghui_test_MainActivity_initial(JNIEnv *env, jobject obj, jint width, jint height) { const char* out_path = "rtmp://192.168.2.176/live/livestream"; yuv_width = width; yuv_height = height; y_length = width * height; uv_length = width * height / 4; //FFmpeg av_log() callback av_log_set_callback(custom_log); av_register_all(); avformat_network_init(); //output initialize avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path); //output encoder initialize //函数的参数是一个解码器的ID,返回查找到的解码器(没有找到就返回NULL)。 pCodec = avcodec_find_encoder(AV_CODEC_ID_H264); if (!pCodec) { LOGE("Can not find encoder!\n"); return -1; } pCodecCtx = avcodec_alloc_context3(pCodec); pCodecCtx->pix_fmt = PIX_FMT_YUV420P; pCodecCtx->width = width; pCodecCtx->height = height; pCodecCtx->time_base.num = 1; pCodecCtx->time_base.den = 25; pCodecCtx->bit_rate = 400000; pCodecCtx->gop_size = 250; /* Some formats want stream headers to be separate. */ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER; //H264 codec param //pCodecCtx->me_range = 16; //pCodecCtx->max_qdiff = 4; //pCodecCtx->qcompress = 0.6; pCodecCtx->qmin = 10; pCodecCtx->qmax = 51; //Optional Param pCodecCtx->max_b_frames = 1; // Set H264 preset and tune AVDictionary *param = 0; // av_dict_set(&param, "preset", "ultrafast", 0); // av_dict_set(&param, "tune", "zerolatency", 0); av_opt_set(pCodecCtx->priv_data, "preset", "superfast", 0); av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0); //打开编码器 if (avcodec_open2(pCodecCtx, pCodec, &param) < 0) { LOGE("Failed to open encoder!\n"); return -1; } //Add a new stream to output,should be called by the user before avformat_write_header() for muxing video_st = avformat_new_stream(ofmt_ctx, pCodec); if (video_st == NULL) { return -1; } video_st->time_base.num = 1; video_st->time_base.den = 25; video_st->codec = pCodecCtx; //Open output URL,set before avformat_write_header() for muxing if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0) { LOGE("Failed to open output file!\n"); return -1; } //Write File Header avformat_write_header(ofmt_ctx, NULL); start_time = av_gettime(); return 0; } JNIEXPORT jint JNICALL Java_com_zhanghui_test_MainActivity_encode(JNIEnv *env, jobject obj, jbyteArray yuv) { int ret; int enc_got_frame = 0; int i = 0; // 为解码帧分配内存 pFrameYUV = avcodec_alloc_frame(); uint8_t *out_buffer = (uint8_t *) av_malloc( avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height)); avpicture_fill((AVPicture *) pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height); //安卓摄像头数据为NV21格式,此处将其转换为YUV420P格式 jbyte* in = (jbyte*) (*env)->GetByteArrayElements(env, yuv, 0); memcpy(pFrameYUV->data[0], in, y_length); for (i = 0; i < uv_length; i++) { *(pFrameYUV->data[2] + i) = *(in + y_length + i * 2); *(pFrameYUV->data[1] + i) = *(in + y_length + i * 2 + 1); } pFrameYUV->format = AV_PIX_FMT_YUV420P; pFrameYUV->width = yuv_width; pFrameYUV->height = yuv_height; enc_pkt.data = NULL; enc_pkt.size = 0; // 定义AVPacket对象后,请使用av_init_packet进行初始化 av_init_packet(&enc_pkt); /** 编码一帧视频数据 * int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr); 该函数每个参数的含义在注释里面已经写的很清楚了,在这里用中文简述一下: avctx:编码器的AVCodecContext。 avpkt:编码输出的AVPacket。 frame:编码输入的AVFrame。 got_packet_ptr:成功编码一个AVPacket的时候设置为1。 函数返回0代表编码成功。 */ ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame); av_frame_free(&pFrameYUV); if (enc_got_frame == 1) { LOGI("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size); framecnt++; //标识该AVPacket所属的视频/音频流。 enc_pkt.stream_index = video_st->index; //标识该视频/音频流 //Write PTS AVRational time_base = ofmt_ctx->streams[0]->time_base; //{ 1, 1000 }; AVRational r_framerate1 = { 60, 2 }; //{ 50, 2 }; AVRational time_base_q = { 1, AV_TIME_BASE }; //Duration between 2 frames (us) int64_t calc_duration = (double) (AV_TIME_BASE) * (1 / av_q2d(r_framerate1)); //内部时间戳 //Parameters //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base)); enc_pkt.pts = av_rescale_q(framecnt * calc_duration, time_base_q, time_base); enc_pkt.dts = enc_pkt.pts; enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base)); enc_pkt.pos = -1; //Delay int64_t pts_time = av_rescale_q(enc_pkt.dts, time_base, time_base_q); int64_t now_time = av_gettime() - start_time; if (pts_time > now_time) av_usleep(pts_time - now_time); ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt); av_free_packet(&enc_pkt); } // output(ofmt_ctx); return 0; } JNIEXPORT jint JNICALL Java_com_zhanghui_test_MainActivity_flush(JNIEnv *env, jobject obj) { int ret; int got_frame; AVPacket enc_pkt; if (!(ofmt_ctx->streams[0]->codec->codec->capabilities & CODEC_CAP_DELAY)) return 0; while (1) { enc_pkt.data = NULL; enc_pkt.size = 0; av_init_packet(&enc_pkt); ret = avcodec_encode_video2(ofmt_ctx->streams[0]->codec, &enc_pkt, NULL, &got_frame); if (ret < 0) break; if (!got_frame) { ret = 0; break; } LOGI("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size); //Write PTS AVRational time_base = ofmt_ctx->streams[0]->time_base; //{ 1, 1000 }; AVRational r_framerate1 = { 60, 2 }; AVRational time_base_q = { 1, AV_TIME_BASE }; //Duration between 2 frames (us) int64_t calc_duration = (double) (AV_TIME_BASE) * (1 / av_q2d(r_framerate1)); //内部时间戳 //Parameters enc_pkt.pts = av_rescale_q(framecnt * calc_duration, time_base_q, time_base); enc_pkt.dts = enc_pkt.pts; enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //转换PTS/DTS(Convert PTS/DTS) enc_pkt.pos = -1; framecnt++; ofmt_ctx->duration = enc_pkt.duration * framecnt; /* mux encoded frame */ ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt); if (ret < 0) break; } //Write file trailer av_write_trailer(ofmt_ctx); return 0; } JNIEXPORT jint JNICALL Java_com_zhanghui_test_MainActivity_close(JNIEnv *env, jobject obj) { if (video_st) avcodec_close(video_st->codec); avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); return 0; } ```
“Unfortunately, app name has stopped.”android设备或者是模拟器
我在根据一些教程来创建app,当我想要运行app的时候,没有显示错误,但是在emulator显示"Unfortunately, Viewfinder blabla has stopped." 代码: AndroidManifest.xml file <manifest xmlns:android="http://schemas.android.com/apk/res/android" package="com.example.viewfinderee368" android:versionCode="1" android:versionName="1.0"> <application android:icon="@drawable/icon" android:label="@string/app_name"> <activity android:name=".ViewfinderEE368" android:label="@string/app_name" android:screenOrientation="landscape"> <intent-filter> <action android:name="android.intent.action.MAIN"/> <category android:name="android.intent.category.LAUNCHER"/> </intent-filter> </activity> </application> <uses-sdk android:minSdkVersion="6"/> <uses-permission android:name="android.permission.CAMERA"/> <uses-feature android:name="android.hardware.camera"/> <uses-feature android:name="android.hardware.camera.autofocus"/> </manifest> src/.java file package com.example.viewfinderee368; import java.io.IOException; import android.app.Activity; import android.content.Context; import android.graphics.Bitmap; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; import android.graphics.Rect; import android.graphics.RectF; import android.hardware.Camera; import android.hardware.Camera.PreviewCallback; import android.os.Bundle; import android.view.SurfaceHolder; import android.view.SurfaceView; import android.view.View; import android.view.Window; import android.view.WindowManager; import android.view.ViewGroup.LayoutParams; // ---------------------------------------------------------------------- public class ViewfinderEE368 extends Activity { private Preview mPreview; private DrawOnTop mDrawOnTop; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); // 隐藏窗口标题H getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN); requestWindowFeature(Window.FEATURE_NO_TITLE); //创建预览视图然后将它设置为activity的目录 //创建DrawOnTop视图. mDrawOnTop = new DrawOnTop(this); mPreview = new Preview(this, mDrawOnTop); setContentView(mPreview); addContentView(mDrawOnTop, new LayoutParams(LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT)); } } //---------------------------------------------------------------------- class DrawOnTop extends View { Bitmap mBitmap; Paint mPaintBlack; Paint mPaintYellow; Paint mPaintRed; Paint mPaintGreen; Paint mPaintBlue; byte[] mYUVData; int[] mRGBData; int mImageWidth, mImageHeight; int[] mRedHistogram; int[] mGreenHistogram; int[] mBlueHistogram; double[] mBinSquared; public DrawOnTop(Context context) { super(context); mPaintBlack = new Paint(); mPaintBlack.setStyle(Paint.Style.FILL); mPaintBlack.setColor(Color.BLACK); mPaintBlack.setTextSize(25); mPaintYellow = new Paint(); mPaintYellow.setStyle(Paint.Style.FILL); mPaintYellow.setColor(Color.YELLOW); mPaintYellow.setTextSize(25); mPaintRed = new Paint(); mPaintRed.setStyle(Paint.Style.FILL); mPaintRed.setColor(Color.RED); mPaintRed.setTextSize(25); mPaintGreen = new Paint(); mPaintGreen.setStyle(Paint.Style.FILL); mPaintGreen.setColor(Color.GREEN); mPaintGreen.setTextSize(25); mPaintBlue = new Paint(); mPaintBlue.setStyle(Paint.Style.FILL); mPaintBlue.setColor(Color.BLUE); mPaintBlue.setTextSize(25); mBitmap = null; mYUVData = null; mRGBData = null; mRedHistogram = new int[256]; mGreenHistogram = new int[256]; mBlueHistogram = new int[256]; mBinSquared = new double[256]; for (int bin = 0; bin < 256; bin++) { mBinSquared[bin] = ((double)bin) * bin; } // bin } @Override protected void onDraw(Canvas canvas) { if (mBitmap != null) { int canvasWidth = canvas.getWidth(); int canvasHeight = canvas.getHeight(); int newImageWidth = canvasWidth; int newImageHeight = canvasHeight; int marginWidth = (canvasWidth - newImageWidth)/2; // 从YUV转换成RGB decodeYUV420SP(mRGBData, mYUVData, mImageWidth, mImageHeight); // 画位图 // mBitmap.setPixels(mRGBData, 0, mImageWidth, 0, 0, // mImageWidth, mImageHeight); // Rect src = new Rect(0, 0, mImageWidth, mImageHeight); // Rect dst = new Rect(marginWidth, 0, // canvasWidth-marginWidth, canvasHeight); // canvas.drawBitmap(mBitmap, src, dst, mPaintBlack); // 画黑色边缘 // canvas.drawRect(0, 0, marginWidth, canvasHeight, mPaintBlack); // canvas.drawRect(canvasWidth - marginWidth, 0, // canvasWidth, canvasHeight, mPaintBlack); // 计算直方图 calculateIntensityHistogram(mRGBData, mRedHistogram, mImageWidth, mImageHeight, 0); calculateIntensityHistogram(mRGBData, mGreenHistogram, mImageWidth, mImageHeight, 1); calculateIntensityHistogram(mRGBData, mBlueHistogram, mImageWidth, mImageHeight, 2); // 计算平均值 double imageRedMean = 0, imageGreenMean = 0, imageBlueMean = 0; double redHistogramSum = 0, greenHistogramSum = 0, blueHistogramSum = 0; for (int bin = 0; bin < 256; bin++) { imageRedMean += mRedHistogram[bin] * bin; redHistogramSum += mRedHistogram[bin]; imageGreenMean += mGreenHistogram[bin] * bin; greenHistogramSum += mGreenHistogram[bin]; imageBlueMean += mBlueHistogram[bin] * bin; blueHistogramSum += mBlueHistogram[bin]; } // bin imageRedMean /= redHistogramSum; imageGreenMean /= greenHistogramSum; imageBlueMean /= blueHistogramSum; double imageRed2ndMoment = 0, imageGreen2ndMoment = 0, imageBlue2ndMoment = 0; for (int bin = 0; bin < 256; bin++) { imageRed2ndMoment += mRedHistogram[bin] * mBinSquared[bin]; imageGreen2ndMoment += mGreenHistogram[bin] * mBinSquared[bin]; imageBlue2ndMoment += mBlueHistogram[bin] * mBinSquared[bin]; } // bin imageRed2ndMoment /= redHistogramSum; imageGreen2ndMoment /= greenHistogramSum; imageBlue2ndMoment /= blueHistogramSum; double imageRedStdDev = Math.sqrt( imageRed2ndMoment - imageRedMean*imageRedMean ); double imageGreenStdDev = Math.sqrt( imageGreen2ndMoment - imageGreenMean*imageGreenMean ); double imageBlueStdDev = Math.sqrt( imageBlue2ndMoment - imageBlueMean*imageBlueMean ); // 画平均值 String imageMeanStr = "Mean (R,G,B): " + String.format("%.4g", imageRedMean) + ", " + String.format("%.4g", imageGreenMean) + ", " + String.format("%.4g", imageBlueMean); canvas.drawText(imageMeanStr, marginWidth+10-1, 30-1, mPaintBlack); canvas.drawText(imageMeanStr, marginWidth+10+1, 30-1, mPaintBlack); canvas.drawText(imageMeanStr, marginWidth+10+1, 30+1, mPaintBlack); canvas.drawText(imageMeanStr, marginWidth+10-1, 30+1, mPaintBlack); canvas.drawText(imageMeanStr, marginWidth+10, 30, mPaintYellow); // 画标准差 String imageStdDevStr = "Std Dev (R,G,B): " + String.format("%.4g", imageRedStdDev) + ", " + String.format("%.4g", imageGreenStdDev) + ", " + String.format("%.4g", imageBlueStdDev); canvas.drawText(imageStdDevStr, marginWidth+10-1, 60-1, mPaintBlack); canvas.drawText(imageStdDevStr, marginWidth+10+1, 60-1, mPaintBlack); canvas.drawText(imageStdDevStr, marginWidth+10+1, 60+1, mPaintBlack); canvas.drawText(imageStdDevStr, marginWidth+10-1, 60+1, mPaintBlack); canvas.drawText(imageStdDevStr, marginWidth+10, 60, mPaintYellow); // 画红色直方图 float barMaxHeight = 3000; float barWidth = ((float)newImageWidth) / 256; float barMarginHeight = 2; RectF barRect = new RectF(); barRect.bottom = canvasHeight - 200; barRect.left = marginWidth; barRect.right = barRect.left + barWidth; for (int bin = 0; bin < 256; bin++) { float prob = (float)mRedHistogram[bin] / (float)redHistogramSum; barRect.top = barRect.bottom - Math.min(80,prob*barMaxHeight) - barMarginHeight; canvas.drawRect(barRect, mPaintBlack); barRect.top += barMarginHeight; canvas.drawRect(barRect, mPaintRed); barRect.left += barWidth; barRect.right += barWidth; } // bin // 画绿色直方图 barRect.bottom = canvasHeight - 100; barRect.left = marginWidth; barRect.right = barRect.left + barWidth; for (int bin = 0; bin < 256; bin++) { barRect.top = barRect.bottom - Math.min(80, ((float)mGreenHistogram[bin])/((float)greenHistogramSum) * barMaxHeight) - barMarginHeight; canvas.drawRect(barRect, mPaintBlack); barRect.top += barMarginHeight; canvas.drawRect(barRect, mPaintGreen); barRect.left += barWidth; barRect.right += barWidth; } // bin // 画蓝色直方图 barRect.bottom = canvasHeight; barRect.left = marginWidth; barRect.right = barRect.left + barWidth; for (int bin = 0; bin < 256; bin++) { barRect.top = barRect.bottom - Math.min(80, ((float)mBlueHistogram[bin])/((float)blueHistogramSum) * barMaxHeight) - barMarginHeight; canvas.drawRect(barRect, mPaintBlack); barRect.top += barMarginHeight; canvas.drawRect(barRect, mPaintBlue); barRect.left += barWidth; barRect.right += barWidth; } // bin } //结束条件语句 super.onDraw(canvas); } // 结束onDraw方法 static public void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) { final int frameSize = width * height; for (int j = 0, yp = 0; j < height; j++) { int uvp = frameSize + (j >> 1) * width, u = 0, v = 0; for (int i = 0; i < width; i++, yp++) { int y = (0xff & ((int) yuv420sp[yp])) - 16; if (y < 0) y = 0; if ((i & 1) == 0) { v = (0xff & yuv420sp[uvp++]) - 128; u = (0xff & yuv420sp[uvp++]) - 128; } int y1192 = 1192 * y; int r = (y1192 + 1634 * v); int g = (y1192 - 833 * v - 400 * u); int b = (y1192 + 2066 * u); if (r < 0) r = 0; else if (r > 262143) r = 262143; if (g < 0) g = 0; else if (g > 262143) g = 262143; if (b < 0) b = 0; else if (b > 262143) b = 262143; rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff); } } } static public void decodeYUV420SPGrayscale(int[] rgb, byte[] yuv420sp, int width, int height) { final int frameSize = width * height; for (int pix = 0; pix < frameSize; pix++) { int pixVal = (0xff & ((int) yuv420sp[pix])) - 16; if (pixVal < 0) pixVal = 0; if (pixVal > 255) pixVal = 255; rgb[pix] = 0xff000000 | (pixVal << 16) | (pixVal << 8) | pixVal; } } static public void calculateIntensityHistogram(int[] rgb, int[] histogram, int width, int height, int component) { for (int bin = 0; bin < 256; bin++) { histogram[bin] = 0; } // bin if (component == 0) // red { for (int pix = 0; pix < width*height; pix += 3) { int pixVal = (rgb[pix] >> 16) & 0xff; histogram[ pixVal ]++; } // pix } else if (component == 1) // green { for (int pix = 0; pix < width*height; pix += 3) { int pixVal = (rgb[pix] >> 8) & 0xff; histogram[ pixVal ]++; } // pix } else // blue { for (int pix = 0; pix < width*height; pix += 3) { int pixVal = rgb[pix] & 0xff; histogram[ pixVal ]++; } // pix } } } // ---------------------------------------------------------------------- class Preview extends SurfaceView implements SurfaceHolder.Callback { SurfaceHolder mHolder; Camera mCamera; DrawOnTop mDrawOnTop; boolean mFinished; Preview(Context context, DrawOnTop drawOnTop) { super(context); mDrawOnTop = drawOnTop; mFinished = false; // 安装SurfaceHolder.Callback以便当下垫面被创建和销毁的时候我们能够获得通知 mHolder = getHolder(); mHolder.addCallback(this); mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); } public void surfaceCreated(SurfaceHolder holder) { mCamera = Camera.open(); try { mCamera.setPreviewDisplay(holder); mCamera.setPreviewCallback(new PreviewCallback() { public void onPreviewFrame(byte[] data, Camera camera) { if ( (mDrawOnTop == null) || mFinished ) return; if (mDrawOnTop.mBitmap == null) { // 初始化draw-on-top companion Camera.Parameters params = camera.getParameters(); mDrawOnTop.mImageWidth = params.getPreviewSize().width; mDrawOnTop.mImageHeight = params.getPreviewSize().height; mDrawOnTop.mBitmap = Bitmap.createBitmap(mDrawOnTop.mImageWidth, mDrawOnTop.mImageHeight, Bitmap.Config.RGB_565); mDrawOnTop.mRGBData = new int[mDrawOnTop.mImageWidth * mDrawOnTop.mImageHeight]; mDrawOnTop.mYUVData = new byte[data.length]; } System.arraycopy(data, 0, mDrawOnTop.mYUVData, 0, data.length); mDrawOnTop.invalidate(); } }); } catch (IOException exception) { mCamera.release(); mCamera = null; } } public void surfaceDestroyed(SurfaceHolder holder) { // 但给我们返回的时候,Surface将被摧毁,所以停止预览 // 因为CameraDevice object不是共享的对象,所以当activity停止的时候要释放它。这个很重要。 mFinished = true; mCamera.setPreviewCallback(null); mCamera.stopPreview(); mCamera.release(); mCamera = null; } public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) { // 现在这个大小是知道的,设置相机参数然后开始预览 Camera.Parameters parameters = mCamera.getParameters(); parameters.setPreviewSize(320, 240); parameters.setPreviewFrameRate(15); parameters.setSceneMode(Camera.Parameters.SCENE_MODE_NIGHT); parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_AUTO); mCamera.setParameters(parameters); mCamera.startPreview(); } } 我尝试在真机下和模拟器下运行,但是错误都一样 LogCat 11-20 02:50:38.943: E/AndroidRuntime(614): FATAL EXCEPTION: main 11-20 02:50:38.943: E/AndroidRuntime(614): java.lang.RuntimeException: Unable to start activity ComponentInfo{com.example.viewfinderee368/com.example.viewfinderee368.ViewfinderEE368}: android.util.AndroidRuntimeException: requestFeature() must be called before adding content 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.ActivityThread.performLaunchActivity(ActivityThread.java:2059) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.ActivityThread.handleLaunchActivity(ActivityThread.java:2084) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.ActivityThread.access$600(ActivityThread.java:130) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.ActivityThread$H.handleMessage(ActivityThread.java:1195) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.os.Handler.dispatchMessage(Handler.java:99) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.os.Looper.loop(Looper.java:137) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.ActivityThread.main(ActivityThread.java:4745) 11-20 02:50:38.943: E/AndroidRuntime(614): at java.lang.reflect.Method.invokeNative(Native Method) 11-20 02:50:38.943: E/AndroidRuntime(614): at java.lang.reflect.Method.invoke(Method.java:511) 11-20 02:50:38.943: E/AndroidRuntime(614): at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:786) 11-20 02:50:38.943: E/AndroidRuntime(614): at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:553) 11-20 02:50:38.943: E/AndroidRuntime(614): at dalvik.system.NativeStart.main(Native Method) 11-20 02:50:38.943: E/AndroidRuntime(614): Caused by: android.util.AndroidRuntimeException: requestFeature() must be called before adding content 11-20 02:50:38.943: E/AndroidRuntime(614): at com.android.internal.policy.impl.PhoneWindow.requestFeature(PhoneWindow.java:215) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.Activity.requestWindowFeature(Activity.java:3225) 11-20 02:50:38.943: E/AndroidRuntime(614): at com.example.viewfinderee368.ViewfinderEE368.onCreate(ViewfinderEE368.java:52) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.Activity.performCreate(Activity.java:5008) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.Instrumentation.callActivityOnCreate(Instrumentation.java:1079) 11-20 02:50:38.943: E/AndroidRuntime(614): at android.app.ActivityThread.performLaunchActivity(ActivityThread.java:2023) 11-20 02:50:38.943: E/AndroidRuntime(614): ... 11 more
directShow操作demo,获取视频信息时报错,尝试读取或写入受保护的内存
WinStructs.VIDEOINFOHEADER videoInfo = (WinStructs.VIDEOINFOHEADER)Marshal.PtrToStructure(mediaType.pbFormat, typeof(WinStructs.VIDEOINFOHEADER)); 其中mediaType.pbFormat是获取到的视频指针,转换成WinStructs.VIDEOINFOHEADER结构体 WinStructs.VIDEOINFOHEADER为创建好的结构体内容如下: public class WinStructs { /// <summary> /// The VIDEOINFOHEADER structure describes the bitmap and color information for a video image /// </summary> [StructLayout(LayoutKind.Sequential)] public struct VIDEOINFOHEADER { /// <summary>RECT structure that specifies the source video window. This structure can be a clipping rectangle, to select a portion of the source video stream.</summary> public RECT rcSource; /// <summary>RECT structure that specifies the destination video window.</summary> public RECT rcTarget; /// <summary>Approximate data rate of the video stream, in bits per second</summary> public uint dwBitRate; /// <summary>Data error rate, in bit errors per second</summary> public uint dwBitErrorRate; /// <summary>The desired average display time of the video frames, in 100-nanosecond units. The actual time per frame may be longer. See Remarks.</summary> public long AvgTimePerFrame; /// <summary>BITMAPINFOHEADER structure that contains color and dimension information for the video image bitmap. If the format block contains a color table or color masks, they immediately follow the bmiHeader member. You can get the first color entry by casting the address of member to a BITMAPINFO pointer</summary> public BITMAPINFOHEADER bmiHeader; } [StructLayout(LayoutKind.Sequential)] public struct RECT { int left; int top; int right; int bottom; } /// <summary> /// The BITMAPINFOHEADER structure contains information about the dimensions and color format of a device-independent bitmap (DIB). /// SEE MSDN /// </summary> [StructLayout(LayoutKind.Sequential)] public struct BITMAPINFOHEADER { /// <summary>Specifies the number of bytes required by the structure. This value does not include the size of the color table or the size of the color masks, if they are appended to the end of structure. See Remarks.</summary> public uint biSize; /// <summary>Specifies the width of the bitmap, in pixels. For information about calculating the stride of the bitmap, see Remarks.</summary> public int biWidth; /// <summary>Specifies the height of the bitmap, in pixels. SEE MSDN</summary> public int biHeight; /// <summary>Specifies the number of planes for the target device. This value must be set to 1</summary> public ushort biPlanes; /// <summary>Specifies the number of bits per pixel (bpp). For uncompressed formats, this value gives to the average number of bits per pixel. For compressed formats, this value gives the implied bit depth of the uncompressed image, after the image has been decoded.</summary> public ushort biBitCount; /// <summary>For compressed video and YUV formats, this member is a FOURCC code, specified as a DWORD in little-endian order. For example, YUYV video has the FOURCC 'VYUY' or 0x56595559. SEE MSDN</summary> public uint biCompression; /// <summary>Specifies the size, in bytes, of the image. This can be set to 0 for uncompressed RGB bitmaps</summary> public uint biSizeImage; /// <summary>Specifies the horizontal resolution, in pixels per meter, of the target device for the bitmap</summary> public int biXPelsPerMeter; /// <summary>Specifies the vertical resolution, in pixels per meter, of the target device for the bitmap</summary> public int biYPelsPerMeter; /// <summary>Specifies the number of color indices in the color table that are actually used by the bitmap. See Remarks for more information.</summary> public uint biClrUsed; /// <summary>Specifies the number of color indices that are considered important for displaying the bitmap. If this value is zero, all colors are important</summary> public uint biClrImportant; } } 请问哪位大神编一个这个关于视频获取指针的directShow操作demo,编译是可以通过,只是在获取视频信息时报错!网上查说是类型没有指定长度!可长度该怎么指定呢?请大神指教……
x64环境下,把内嵌汇编的汇编单独放在.asm文件中
void GDIRender::YUV_TO_RGB24(unsigned char *puc_y, int stride_y, unsigned char *puc_u, unsigned char *puc_v, int stride_uv, unsigned char *puc_out, int width_y, int height_y,int stride_out) { int y, horiz_count; unsigned char *puc_out_remembered; if (height_y < 0) { /* we are flipping our output upside-down */ height_y = -height_y; puc_y += (height_y - 1) * stride_y ; puc_u += (height_y/2 - 1) * stride_uv; puc_v += (height_y/2 - 1) * stride_uv; stride_y = -stride_y; stride_uv = -stride_uv; } horiz_count = -(width_y >> 3); for (y=0; y<height_y; y++) { if (y == height_y-1) { /* this is the last output line - we need to be careful not to overrun the end of this line */ unsigned char temp_buff[3*MAXIMUM_Y_WIDTH+1]; puc_out_remembered = puc_out; puc_out = temp_buff; /* write the RGB to a temporary store */ } _asm { push eax push ebx push ecx push edx push edi mov eax, puc_out mov ebx, puc_y mov ecx, puc_u mov edx, puc_v mov edi, horiz_count horiz_loop: movd mm2, [ecx] pxor mm7, mm7 movd mm3, [edx] punpcklbw mm2, mm7 ; mm2 = __u3__u2__u1__u0 movq mm0, [ebx] ; mm0 = y7y6y5y4y3y2y1y0 punpcklbw mm3, mm7 ; mm3 = __v3__v2__v1__v0 movq mm1, mmw_0x00ff ; mm1 = 00ff00ff00ff00ff psubusb mm0, mmb_0x10 ; mm0 -= 16 psubw mm2, mmw_0x0080 ; mm2 -= 128 pand mm1, mm0 ; mm1 = __y6__y4__y2__y0 psubw mm3, mmw_0x0080 ; mm3 -= 128 psllw mm1, 3 ; mm1 *= 8 psrlw mm0, 8 ; mm0 = __y7__y5__y3__y1 psllw mm2, 3 ; mm2 *= 8 pmulhw mm1, mmw_mult_Y ; mm1 *= luma coeff psllw mm0, 3 ; mm0 *= 8 psllw mm3, 3 ; mm3 *= 8 movq mm5, mm3 ; mm5 = mm3 = v pmulhw mm5, mmw_mult_V_R ; mm5 = red chroma movq mm4, mm2 ; mm4 = mm2 = u pmulhw mm0, mmw_mult_Y ; mm0 *= luma coeff movq mm7, mm1 ; even luma part pmulhw mm2, mmw_mult_U_G ; mm2 *= u green coeff paddsw mm7, mm5 ; mm7 = luma + chroma __r6__r4__r2__r0 pmulhw mm3, mmw_mult_V_G ; mm3 *= v green coeff packuswb mm7, mm7 ; mm7 = r6r4r2r0r6r4r2r0 pmulhw mm4, mmw_mult_U_B ; mm4 = blue chroma paddsw mm5, mm0 ; mm5 = luma + chroma __r7__r5__r3__r1 packuswb mm5, mm5 ; mm6 = r7r5r3r1r7r5r3r1 paddsw mm2, mm3 ; mm2 = green chroma movq mm3, mm1 ; mm3 = __y6__y4__y2__y0 movq mm6, mm1 ; mm6 = __y6__y4__y2__y0 paddsw mm3, mm4 ; mm3 = luma + chroma __b6__b4__b2__b0 paddsw mm6, mm2 ; mm6 = luma + chroma __g6__g4__g2__g0 punpcklbw mm7, mm5 ; mm7 = r7r6r5r4r3r2r1r0 paddsw mm2, mm0 ; odd luma part plus chroma part __g7__g5__g3__g1 packuswb mm6, mm6 ; mm2 = g6g4g2g0g6g4g2g0 packuswb mm2, mm2 ; mm2 = g7g5g3g1g7g5g3g1 packuswb mm3, mm3 ; mm3 = b6b4b2b0b6b4b2b0 paddsw mm4, mm0 ; odd luma part plus chroma part __b7__b5__b3__b1 packuswb mm4, mm4 ; mm4 = b7b5b3b1b7b5b3b1 punpcklbw mm6, mm2 ; mm6 = g7g6g5g4g3g2g1g0 punpcklbw mm3, mm4 ; mm3 = b7b6b5b4b3b2b1b0 /* 32-bit shuffle.... */ pxor mm0, mm0 ; is this needed? movq mm1, mm6 ; mm1 = g7g6g5g4g3g2g1g0 punpcklbw mm1, mm0 ; mm1 = __g3__g2__g1__g0 movq mm0, mm3 ; mm0 = b7b6b5b4b3b2b1b0 punpcklbw mm0, mm7 ; mm0 = r3b3r2b2r1b1r0b0 movq mm2, mm0 ; mm2 = r3b3r2b2r1b1r0b0 punpcklbw mm0, mm1 ; mm0 = __r1g1b1__r0g0b0 punpckhbw mm2, mm1 ; mm2 = __r3g3b3__r2g2b2 /* 24-bit shuffle and save... */ movd [eax], mm0 ; eax[0] = __r0g0b0 psrlq mm0, 32 ; mm0 = __r1g1b1 movd 3[eax], mm0 ; eax[3] = __r1g1b1 movd 6[eax], mm2 ; eax[6] = __r2g2b2 psrlq mm2, 32 ; mm2 = __r3g3b3 movd 9[eax], mm2 ; eax[9] = __r3g3b3 /* 32-bit shuffle.... */ pxor mm0, mm0 ; is this needed? movq mm1, mm6 ; mm1 = g7g6g5g4g3g2g1g0 punpckhbw mm1, mm0 ; mm1 = __g7__g6__g5__g4 movq mm0, mm3 ; mm0 = b7b6b5b4b3b2b1b0 punpckhbw mm0, mm7 ; mm0 = r7b7r6b6r5b5r4b4 movq mm2, mm0 ; mm2 = r7b7r6b6r5b5r4b4 punpcklbw mm0, mm1 ; mm0 = __r5g5b5__r4g4b4 punpckhbw mm2, mm1 ; mm2 = __r7g7b7__r6g6b6 /* 24-bit shuffle and save... */ movd 12[eax], mm0 ; eax[12] = __r4g4b4 psrlq mm0, 32 ; mm0 = __r5g5b5 movd 15[eax], mm0 ; eax[15] = __r5g5b5 add ebx, 8 ; puc_y += 8; movd 18[eax], mm2 ; eax[18] = __r6g6b6 psrlq mm2, 32 ; mm2 = __r7g7b7 add ecx, 4 ; puc_u += 4; add edx, 4 ; puc_v += 4; movd 21[eax], mm2 ; eax[21] = __r7g7b7 add eax, 24 ; puc_out += 24 inc edi jne horiz_loop pop edi pop edx pop ecx pop ebx pop eax emms } if (y == height_y-1) { /* last line of output - we have used the temp_buff and need to copy... */ int x = 3 * width_y; /* interation counter */ unsigned char *ps = puc_out; /* source pointer (temporary line store) */ unsigned char *pd = puc_out_remembered; /* dest pointer */ while (x--) *(pd++) = *(ps++); /* copy the line */ } puc_y += stride_y; if (y%2) { puc_u += stride_uv; puc_v += stride_uv; } puc_out += stride_out; } } 上面是视频解码的视频格式转换的代码,由于x64不支持内嵌汇编,在网上也找了关于移植的文件,我的没有任何汇编基础,还请会的帮我转下汇编,就是把内嵌汇编这段单独挡在一个文件(.asm)中,然后能编译通过,我用的环境是vs2010的x64 ,请知道帮下忙,谢谢
别人给的各种运动目标检测方法的代码,不知道少了什么东西一直调不通
1.我用的是VS2013,配置的是OpenCV3.4.0,运行如下程序的时候就说有许多未定义的标识符,琢磨了好久就是不知道哪里出了问题。求大神帮我看看程序。看看哪里出了问题,,需要的话我可以把整个程序发过去,(有偿) ``` #include <opencv2/highgui/highgui.hpp> #include <opencv2/core/core.hpp> #include <stdio.h> #include <stdlib.h> //#include "cvaux.h" #include "codeb.h" int CVCONTOUR_APPROX_LEVEL = 2; int CVCLOSE_ITR = 1; #define CV_CVX_WHITE CV_RGB(0xff,0xff,0xff) #define CV_CVX_BLACK CV_RGB(0x00,0x00,0x00) codeBook* cA; codeBook* cC; codeBook* cD; int maxMod[CHANNELS]; int minMod[CHANNELS]; unsigned cbBounds[CHANNELS]; bool ch[CHANNELS]; int nChannels = CHANNELS; int imageLen = 0; uchar *pColor; int Td; int Tadd; int Tdel; int T=50; int Fadd=35; int Tavgstale=50; int Fd=2; int Tavgstale_cD=50; int fgcount=0; float beta=0.1f; float gamma=0.1f; float forgratio=0.0f; float Tadap_update=0.4f; int clear_stale_entries(codeBook &c); uchar background_Diff(uchar *p, codeBook &c, int numChannels, int *minMod, int *maxMod); int update_codebook_model(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels); int trainig_codebook(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels); int training_clear_stale_entries(codeBook &c); int det_update_codebook_cC(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels); int det_update_codebook_cD(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels, int numframe); int realtime_clear_stale_entries_cC(codeBook &c, int FrmNum); int realtime_clear_stale_entries_cD(codeBook &c, int FrmNum); int cD_to_cC(codeBook &d, codeBook &c, int FrmNum); uchar background_diff_realtime(uchar* p,codeBook& c,int numChannels,int* minMod,int* maxMod); void help() { printf( "***Keep the focus on the video windows, NOT the consol***\n" "INTERACTIVE PARAMETERS:\n" "\tESC,q,Q - quit the program\n" "\th - print this help\n" "\tp - pause toggle\n" "\ts - single step\n" "\tr - run mode (single step off)\n" "=== CODEBOOK PARAMS ===\n" "\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n" "\ta - adjust all 3 channels at once\n" "\tb - adjust both 2 and 3 at once\n" "\ti,o - bump upper threshold up,down by 1\n" "\tk,l - bump lower threshold up,down by 1\n" "\tz,x - bump Fadd threshold up,down by 1\n" "\tn,m - bump Tavgstale threshold up,down by 1\n" "\t Fadd小更新快,Tavgstale大更新快\n" ); } int count_Segmentation(codeBook *c, IplImage *I, int numChannels, int *minMod, int *maxMod) { int count = 0,i; uchar *pColor; int imageLen = I->width * I->height; //GET BASELINE NUMBER OF FG PIXELS FOR Iraw pColor = (uchar *)((I)->imageData); for(i=0; i<imageLen; i++) { if(background_Diff(pColor, c[i], numChannels, minMod, maxMod)) count++; pColor += 3; } fgcount=count; return(fgcount); } void connected_Components(IplImage *mask, int poly1_hull0, float perimScale, int *num, CvRect *bbs, CvPoint *centers) { static CvMemStorage* mem_storage = NULL; static CvSeq* contours = NULL; //CLEAN UP RAW MASK cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_OPEN, CVCLOSE_ITR ); cvMorphologyEx( mask, mask, NULL, NULL, CV_MOP_CLOSE, CVCLOSE_ITR ); //FIND CONTOURS AROUND ONLY BIGGER REGIONS if( mem_storage==NULL ) mem_storage = cvCreateMemStorage(0); else cvClearMemStorage(mem_storage); CvContourScanner scanner = cvStartFindContours(mask,mem_storage,sizeof(CvContour),CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE); CvSeq* c; int numCont = 0; while( (c = cvFindNextContour( scanner )) != NULL ) { double len = cvContourPerimeter( c ); double q = (mask->height + mask->width) /perimScale; //calculate perimeter len threshold if( len < q ) //Get rid of blob if it's perimeter is too small { cvSubstituteContour( scanner, NULL ); } else //Smooth it's edges if it's large enough { CvSeq* c_new; if(poly1_hull0) //Polygonal approximation of the segmentation c_new = cvApproxPoly(c,sizeof(CvContour),mem_storage,CV_POLY_APPROX_DP, CVCONTOUR_APPROX_LEVEL,0); else //Convex Hull of the segmentation c_new = cvConvexHull2(c,mem_storage,CV_CLOCKWISE,1); cvSubstituteContour( scanner, c_new ); numCont++; } } contours = cvEndFindContours( &scanner ); // PAINT THE FOUND REGIONS BACK INTO THE IMAGE cvZero( mask ); IplImage *maskTemp; //CALC CENTER OF MASS AND OR BOUNDING RECTANGLES if(num != NULL) { int N = *num, numFilled = 0, i=0; CvMoments moments; double M00, M01, M10; maskTemp = cvCloneImage(mask); for(i=0, c=contours; c != NULL; c = c->h_next,i++ ) { if(i < N) //Only process up to *num of them { cvDrawContours(maskTemp,c,CV_CVX_WHITE, CV_CVX_WHITE,-1,CV_FILLED,8); //Find the center of each contour if(centers != NULL) { cvMoments(maskTemp,&moments,1); M00 = cvGetSpatialMoment(&moments,0,0); M10 = cvGetSpatialMoment(&moments,1,0); M01 = cvGetSpatialMoment(&moments,0,1); centers[i].x = (int)(M10/M00); centers[i].y = (int)(M01/M00); } //Bounding rectangles around blobs if(bbs != NULL) { bbs[i] = cvBoundingRect(c); } cvZero(maskTemp); numFilled++; } //Draw filled contours into mask cvDrawContours(mask,c,CV_CVX_WHITE,CV_CVX_WHITE,-1,CV_FILLED,8); //draw to central mask } //end looping over contours *num = numFilled; cvReleaseImage( &maskTemp); } else { for( c=contours; c != NULL; c = c->h_next ) { cvDrawContours(mask,c,CV_CVX_WHITE, CV_CVX_BLACK,-1,CV_FILLED,8); } } } //////////////////////////// int main(int argc, char** argv) { IplImage* temp1 = NULL; IplImage* temp2 = NULL; IplImage* result = NULL; IplImage* result1 = NULL; IplImage* result2 = NULL; CvBGStatModel* bg_model = 0; CvBGStatModel* bg_model1=0; IplImage* rawImage = 0; IplImage* yuvImage = 0; IplImage* rawImage1 = 0; IplImage* pFrImg = 0; IplImage* pFrImg1= 0; IplImage* pFrImg2= 0; IplImage* ImaskCodeBookCC = 0; CvCapture* capture = 0; int c,n; maxMod[0] = 25; minMod[0] = 35; maxMod[1] = 8; minMod[1] = 8; maxMod[2] = 8; minMod[2] = 8; argc=2; argv[1]="intelligentroom_raw.avi"; if( argc > 2 ) { fprintf(stderr, "Usage: bkgrd [video_file_name]\n"); return -1; } if (argc ==1) if( !(capture = cvCaptureFromCAM(-1))) { fprintf(stderr, "Can not open camera.\n"); return -2; } if(argc == 2) if( !(capture = cvCaptureFromFile(argv[1]))) { fprintf(stderr, "Can not open video file %s\n", argv[1]); return -2; } bool pause = false; bool singlestep = false; if( capture ) { cvNamedWindow( "原视频序列图像", 1 ); cvNamedWindow("不实时更新的Codebook算法[本文]",1); cvNamedWindow("实时更新的Codebook算法[本文]",1); cvNamedWindow("基于MOG的方法[Chris Stauffer'2001]",1); cvNamedWindow("三帧差分", 1); cvNamedWindow("基于Bayes decision的方法[Liyuan Li'2003]", 1); cvMoveWindow("原视频序列图像", 0, 0); cvMoveWindow("不实时更新的Codebook算法[本文]", 360, 0); cvMoveWindow("实时更新的Codebook算法[本文]", 720, 350); cvMoveWindow("基于MOG的方法[Chris Stauffer'2001]", 0, 350); cvMoveWindow("三帧差分", 720, 0); cvMoveWindow("基于Bayes decision的方法[Liyuan Li'2003]",360, 350); int nFrmNum = -1; for(;;) { if(!pause) { rawImage = cvQueryFrame( capture ); ++nFrmNum; printf("第%d帧\n",nFrmNum); if(!rawImage) break; } if(singlestep) { pause = true; } if(0 == nFrmNum) { printf(". . . wait for it . . .\n"); temp1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3); temp2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 3); result1 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); result2 = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); result = cvCreateImage(cvGetSize(rawImage), IPL_DEPTH_8U, 1); bg_model = cvCreateGaussianBGModel(rawImage); bg_model1 = cvCreateFGDStatModel(rawImage); rawImage1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 3 ); yuvImage = cvCloneImage(rawImage); pFrImg = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); pFrImg1 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); pFrImg2 = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); ImaskCodeBookCC = cvCreateImage( cvGetSize(rawImage), IPL_DEPTH_8U, 1 ); imageLen = rawImage->width*rawImage->height; cA = new codeBook [imageLen]; cC = new codeBook [imageLen]; cD = new codeBook [imageLen]; for(int f = 0; f<imageLen; f++) { cA[f].numEntries = 0; cA[f].t = 0; cC[f].numEntries = 0; cC[f].t = 0; cD[f].numEntries = 0; cD[f].t = 0; } for(int nc=0; nc<nChannels;nc++) { cbBounds[nc] = 10; } ch[0] = true; ch[1] = true; ch[2] = true; } if( rawImage ) { if(!pause) { cvSmooth(rawImage, rawImage1, CV_GAUSSIAN,3,3); cvChangeDetection(temp1, temp2, result1); cvChangeDetection(rawImage1, temp1, result2); cvAnd(result1, result2, result, NULL); cvCopy(temp1,temp2, NULL); cvCopy(rawImage,temp1, NULL); cvUpdateBGStatModel( rawImage, bg_model ); cvUpdateBGStatModel( rawImage, bg_model1 ); } cvCvtColor( rawImage1, yuvImage, CV_BGR2YCrCb ); if( !pause && nFrmNum >= 1 && nFrmNum < T ) { pColor = (uchar *)((yuvImage)->imageData); for(int c=0; c<imageLen; c++) { update_codebook_model(pColor, cA[c],cbBounds,nChannels); trainig_codebook(pColor, cC[c],cbBounds,nChannels); pColor += 3; } } if( nFrmNum == T) { for(c=0; c<imageLen; c++) { clear_stale_entries(cA[c]); training_clear_stale_entries(cC[c]); } } if(nFrmNum > T) { pColor = (uchar *)((yuvImage)->imageData); uchar maskPixelCodeBook; uchar maskPixelCodeBook1; uchar maskPixelCodeBook2; uchar *pMask = (uchar *)((pFrImg)->imageData); uchar *pMask1 = (uchar *)((pFrImg1)->imageData); uchar *pMask2 = (uchar *)((pFrImg2)->imageData); for(int c=0; c<imageLen; c++) { //本文中不带自动背景更新的算法输出 maskPixelCodeBook1=background_Diff(pColor, cA[c],nChannels,minMod,maxMod); *pMask1++ = maskPixelCodeBook1; //本文中带自动背景更新的算法输出 if ( !pause && det_update_codebook_cC(pColor, cC[c],cbBounds,nChannels)) { det_update_codebook_cD(pColor, cD[c],cbBounds,nChannels, nFrmNum); realtime_clear_stale_entries_cD(cD[c], nFrmNum); cD_to_cC(cD[c], cC[c], (nFrmNum - T)/5); } else { realtime_clear_stale_entries_cC(cC[c], nFrmNum); } maskPixelCodeBook2=background_Diff(pColor, cC[c],nChannels,minMod,maxMod); *pMask2++ = maskPixelCodeBook2; pColor += 3; } cvCopy(pFrImg2,ImaskCodeBookCC); if(!pause) { count_Segmentation(cC,yuvImage,nChannels,minMod,maxMod); forgratio = (float) (fgcount)/ imageLen; } } bg_model1->foreground->origin=1; bg_model->foreground->origin=1; pFrImg->origin=1; pFrImg1->origin=1; pFrImg2->origin=1; ImaskCodeBookCC->origin=1; result->origin=1; //connected_Components(pFrImg1,1,40); //connected_Components(pFrImg2,1,40); cvShowImage("基于MOG的方法[Chris Stauffer'2001]", bg_model->foreground); cvShowImage( "原视频序列图像", rawImage ); cvShowImage("三帧差分", result); cvShowImage( "不实时更新的Codebook算法[本文]",pFrImg1); cvShowImage("实时更新的Codebook算法[本文]",pFrImg2); cvShowImage("基于Bayes decision的方法[Liyuan Li'2003]", bg_model1->foreground); c = cvWaitKey(1)&0xFF; //End processing on ESC, q or Q if(c == 27 || c == 'q' || c == 'Q') break; //Else check for user input switch(c) { case 'h': help(); break; case 'p': pause ^= 1; break; case 's': singlestep = 1; pause = false; break; case 'r': pause = false; singlestep = false; break; //CODEBOOK PARAMS case 'y': case '0': ch[0] = 1; ch[1] = 0; ch[2] = 0; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'u': case '1': ch[0] = 0; ch[1] = 1; ch[2] = 0; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'v': case '2': ch[0] = 0; ch[1] = 0; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'a': //All case '3': ch[0] = 1; ch[1] = 1; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'b': //both u and v together ch[0] = 0; ch[1] = 1; ch[2] = 1; printf("CodeBook YUV Channels active: "); for(n=0; n<nChannels; n++) printf("%d, ",ch[n]); printf("\n"); break; case 'z': printf(" Fadd加1 "); Fadd += 1; printf("Fadd=%.4d\n",Fadd); break; case 'x': printf(" Fadd减1 "); Fadd -= 1; printf("Fadd=%.4d\n",Fadd); break; case 'n': printf(" Tavgstale加1 "); Tavgstale += 1; printf("Tavgstale=%.4d\n",Tavgstale); break; case 'm': printf(" Tavgstale减1 "); Tavgstale -= 1; printf("Tavgstale=%.4d\n",Tavgstale); break; case 'i': //modify max classification bounds (max bound goes higher) for(n=0; n<nChannels; n++) { if(ch[n]) maxMod[n] += 1; printf("%.4d,",maxMod[n]); } printf(" CodeBook High Side\n"); break; case 'o': //modify max classification bounds (max bound goes lower) for(n=0; n<nChannels; n++) { if(ch[n]) maxMod[n] -= 1; printf("%.4d,",maxMod[n]); } printf(" CodeBook High Side\n"); break; case 'k': //modify min classification bounds (min bound goes lower) for(n=0; n<nChannels; n++) { if(ch[n]) minMod[n] += 1; printf("%.4d,",minMod[n]); } printf(" CodeBook Low Side\n"); break; case 'l': //modify min classification bounds (min bound goes higher) for(n=0; n<nChannels; n++) { if(ch[n]) minMod[n] -= 1; printf("%.4d,",minMod[n]); } printf(" CodeBook Low Side\n"); break; } } } cvReleaseCapture( &capture ); cvReleaseBGStatModel((CvBGStatModel**)&bg_model); cvReleaseBGStatModel((CvBGStatModel**)&bg_model1); cvDestroyWindow( "原视频序列图像" ); cvDestroyWindow( "不实时更新的Codebook算法[本文]"); cvDestroyWindow( "实时更新的Codebook算法[本文]"); cvDestroyWindow( "基于MOG的方法[Chris Stauffer'2001]"); cvDestroyWindow( "三帧差分" ); cvDestroyWindow( "基于Bayes decision的方法[Liyuan Li'2003]"); cvReleaseImage(&temp1); cvReleaseImage(&temp2); cvReleaseImage(&result); cvReleaseImage(&result1); cvReleaseImage(&result2); cvReleaseImage(&pFrImg); cvReleaseImage(&pFrImg1); cvReleaseImage(&pFrImg2); if(yuvImage) cvReleaseImage(&yuvImage); if(rawImage) cvReleaseImage(&rawImage); if(rawImage1) cvReleaseImage(&rawImage1); if(ImaskCodeBookCC) cvReleaseImage(&ImaskCodeBookCC); delete [] cA; delete [] cC; delete [] cD; } else { printf("\n\nDarn, Something wrong with the parameters\n\n"); help(); } return 0; } int clear_stale_entries(codeBook &c) { int staleThresh = c.t>>1; int *keep = new int [c.numEntries]; int keepCnt = 0; for(int i=0; i<c.numEntries; i++) { if(c.cb[i]->stale > staleThresh) keep[i] = 0; else { keep[i] = 1; keepCnt += 1; } } c.t = 0; code_element **foo = new code_element* [keepCnt]; int k=0; for(int ii=0; ii<c.numEntries; ii++) { if(keep[ii]) { foo[k] = c.cb[ii]; foo[k]->t_last_update = 0; k++; } } delete [] keep; delete [] c.cb; c.cb = foo; int numCleared = c.numEntries - keepCnt; c.numEntries = keepCnt; return(numCleared); } uchar background_Diff(uchar *p, codeBook &c, int numChannels, int *minMod, int *maxMod) { int matchChannel; int i; for(i=0; i<c.numEntries; i++) { matchChannel = 0; for(int n=0; n<numChannels; n++) { if((c.cb[i]->min[n] - minMod[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->max[n] + maxMod[n])) { matchChannel++; } else { break; } } if(matchChannel == numChannels) { break; } } if(i >= c.numEntries) return(255); return(0); } int update_codebook_model(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels) { if(c.numEntries == 0) c.t = 0; c.t += 1; unsigned int high[3],low[3]; int matchChannel; float avg[3]; for(int i=0; i<c.numEntries; i++) { matchChannel = 0; for(int n=0; n<numChannels; n++) { if((c.cb[i]->learnLow[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->learnHigh[n])) { matchChannel++; } } if(matchChannel == numChannels) { for(int n=0; n<numChannels; n++) { avg[n] = (c.cb[i]->f * c.cb[i]->avg[n] + *(p+n))/(c.cb[i]->f + 1); c.cb[i]->avg[n] = avg[n]; if(c.cb[i]->max[n] < *(p+n)) { c.cb[i]->max[n] = *(p+n); } else if(c.cb[i]->min[n] > *(p+n)) { c.cb[i]->min[n] = *(p+n); } } c.cb[i]->f += 1; c.cb[i]->t_last_update = c.t; int negRun = c.t - c.cb[i]->t_last_update; if(c.cb[i]->stale < negRun) c.cb[i]->stale = negRun; break; } } for(int n=0; n<numChannels; n++) { high[n] = *(p+n)+*(cbBounds+n); if(high[n] > 255) high[n] = 255; low[n] = *(p+n)-*(cbBounds+n); if(low[n] < 0) low[n] = 0; } if(i == c.numEntries) { code_element **foo = new code_element* [c.numEntries+1]; for(int ii=0; ii<c.numEntries; ii++) { foo[ii] = c.cb[ii]; } foo[c.numEntries] = new code_element; if(c.numEntries) delete [] c.cb; c.cb = foo; for(int n=0; n<numChannels; n++) { c.cb[c.numEntries]->avg[n] = *(p+n); c.cb[c.numEntries]->max[n] = *(p+n); c.cb[c.numEntries]->min[n] = *(p+n); c.cb[c.numEntries]->learnHigh[n] = high[n]; c.cb[c.numEntries]->learnLow[n] = low[n]; } c.cb[c.numEntries]->f = 1; c.cb[c.numEntries]->stale = c.t-1; c.cb[c.numEntries]->t_first_update = c.t; c.cb[c.numEntries]->t_last_update = c.t; c.numEntries += 1; } for(int s=0; s<c.numEntries; s++) { int negRun = c.t - c.cb[s]->t_last_update + c.cb[s]->t_first_update -1 ; if(c.cb[s]->stale < negRun) c.cb[s]->stale = negRun; } for(n=0; n<numChannels; n++) { if(c.cb[i]->learnHigh[n] < high[n]) c.cb[i]->learnHigh[n] += 1; if(c.cb[i]->learnLow[n] > low[n]) c.cb[i]->learnLow[n] -= 1; } return(i); } int trainig_codebook(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels) { if(c.numEntries == 0) c.t = 0; c.t += 1; unsigned int high[3],low[3]; int matchChannel; float avg[3]; for(int i=0; i<c.numEntries; i++) { matchChannel = 0; for(int n=0; n<numChannels; n++) { if((c.cb[i]->learnLow[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->learnHigh[n])) { matchChannel++; } } if(matchChannel == numChannels) { for(int n=0; n<numChannels; n++) { avg[n] = (c.cb[i]->f * c.cb[i]->avg[n] + *(p+n))/(c.cb[i]->f + 1); c.cb[i]->avg[n] = avg[n]; if(c.cb[i]->max[n] < *(p+n)) { c.cb[i]->max[n] = *(p+n); } else if(c.cb[i]->min[n] > *(p+n)) { c.cb[i]->min[n] = *(p+n); } } c.cb[i]->f += 1; c.cb[i]->t_last_update = c.t; int negRun = c.t - c.cb[i]->t_last_update; if(c.cb[i]->stale < negRun) c.cb[i]->stale = negRun; if (i!=0) { code_element **fo = new code_element* [c.numEntries]; fo[0] = c.cb[i]; for(int h=0; h<i; h++) { fo[h+1] = c.cb[h]; } for(int h=i+1; h<c.numEntries; h++) { fo[h] = c.cb[h]; } if(c.numEntries) delete [] c.cb; c.cb = fo; } break; } } for(int n=0; n<numChannels; n++) { high[n] = *(p+n)+*(cbBounds+n); if(high[n] > 255) high[n] = 255; low[n] = *(p+n)-*(cbBounds+n); if(low[n] < 0) low[n] = 0; } if(i == c.numEntries) { code_element **foo = new code_element* [c.numEntries+1]; for(int ii=0; ii<c.numEntries; ii++) { foo[ii] = c.cb[ii]; } foo[c.numEntries] = new code_element; if(c.numEntries) delete [] c.cb; c.cb = foo; for(n=0; n<numChannels; n++) { c.cb[c.numEntries]->avg[n] = *(p+n); c.cb[c.numEntries]->max[n] = *(p+n); c.cb[c.numEntries]->min[n] = *(p+n); c.cb[c.numEntries]->learnHigh[n] = high[n]; c.cb[c.numEntries]->learnLow[n] = low[n]; } c.cb[c.numEntries]->f = 1; c.cb[c.numEntries]->stale = c.t-1; c.cb[c.numEntries]->t_first_update = c.t; c.cb[c.numEntries]->t_last_update = c.t; c.numEntries += 1; } for(int s=0; s<c.numEntries; s++) { int negRun = c.t - c.cb[s]->t_last_update + c.cb[s]->t_first_update -1 ; if(c.cb[s]->stale < negRun) c.cb[s]->stale = negRun; } for(int n=0; n<numChannels; n++) { if(c.cb[i]->learnHigh[n] < high[n]) c.cb[i]->learnHigh[n] += 1; if(c.cb[i]->learnLow[n] > low[n]) c.cb[i]->learnLow[n] -= 1; } return(i); } int training_clear_stale_entries(codeBook &c) { int staleThresh = c.t>>1; int *keep = new int [c.numEntries]; int keepCnt = 0; for(int i=0; i<c.numEntries; i++) { if(c.cb[i]->stale > staleThresh) keep[i] = 0; else { keep[i] = 1; keepCnt += 1; } } code_element **foo = new code_element* [keepCnt]; int k=0; for(int ii=0; ii<c.numEntries; ii++) { if(keep[ii]) { foo[k] = c.cb[ii]; k++; } } delete [] keep; delete [] c.cb; c.cb = foo; int numCleared = c.numEntries - keepCnt; c.numEntries = keepCnt; return(numCleared); } int det_update_codebook_cC(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels) { c.t += 1; int matchChannel; float avg[3]; int learnLow[3],learnHigh[3]; for(int i=0; i<c.numEntries; i++) { matchChannel = 0; for(int n=0; n<numChannels; n++) { if (forgratio >= Tadap_update ) { learnLow[n] = c.cb[i]->learnLow[n] * (1 - gamma); c.cb[i]->learnLow[n] = learnLow[n]; learnHigh[n] = c.cb[i]->learnHigh[n] * (1 + gamma); c.cb[i]->learnHigh[n] = learnHigh[n]; } if((c.cb[i]->learnLow[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->learnHigh[n])) { matchChannel++; } } if(matchChannel == numChannels) { if (forgratio >= Tadap_update ) { for(int n=0; n<numChannels; n++) { avg[n] = (1 - beta) * c.cb[i]->avg[n] + *(p+n) * beta; c.cb[i]->avg[n] = avg[n]; if(c.cb[i]->max[n] < *(p+n)) { c.cb[i]->max[n] = *(p+n); } else if(c.cb[i]->min[n] > *(p+n)) { c.cb[i]->min[n] = *(p+n); } } } else { for(int n=0; n<numChannels; n++) { avg[n] = (c.cb[i]->f * c.cb[i]->avg[n] + *(p+n))/(c.cb[i]->f + 1); c.cb[i]->avg[n] = avg[n]; if(c.cb[i]->max[n] < *(p+n)) { c.cb[i]->max[n] = *(p+n); } else if(c.cb[i]->min[n] > *(p+n)) { c.cb[i]->min[n] = *(p+n); } } } int negRun = c.t - c.cb[i]->t_last_update; if(c.cb[i]->stale < negRun) c.cb[i]->stale = negRun; c.cb[i]->t_last_update = c.t; c.cb[i]->f += 1; break; } } if( i == c.numEntries) return (i); return(0); } int det_update_codebook_cD(uchar* p,codeBook& c,unsigned* cbBounds,int numChannels, int numframe) { if(c.numEntries == 0) c.t = numframe -1; c.t += 1; unsigned int high[3],low[3]; int matchChannel; float avg[3]; int learnLow[3],learnHigh[3]; for(int i=0; i<c.numEntries; i++) { matchChannel = 0; for(int n=0; n<numChannels; n++) { if (forgratio >= Tadap_update ) { learnLow[n] = c.cb[i]->learnLow[n] * (1 - gamma); c.cb[i]->learnLow[n] = learnLow[n]; learnHigh[n] = c.cb[i]->learnHigh[n] * (1 + gamma); c.cb[i]->learnHigh[n] = learnHigh[n]; } if((c.cb[i]->learnLow[n] <= *(p+n)) && (*(p+n) <= c.cb[i]->learnHigh[n])) { matchChannel++; } } if(matchChannel == numChannels) { if (forgratio >= Tadap_update ) { for(int n=0; n<numChannels; n++) { avg[n] = (1 - beta) * c.cb[i]->avg[n] + *(p+n) * beta; c.cb[i]->avg[n] = avg[n]; if(c.cb[i]->max[n] < *(p+n)) { c.cb[i]->max[n] = *(p+n); } else if(c.cb[i]->min[n] > *(p+n)) { c.cb[i]->min[n] = *(p+n); } } } else { for(int n=0; n<numChannels; n++) { avg[n] = (c.cb[i]->f * c.cb[i]->avg[n] + *(p+n))/(c.cb[i]->f + 1); c.cb[i]->avg[n] = avg[n]; if(c.cb[i]->max[n] < *(p+n)) { c.cb[i]->max[n] = *(p+n); } else if(c.cb[i]->min[n] > *(p+n)) { c.cb[i]->min[n] = *(p+n); } } } int negRun = c.t - c.cb[i]->t_last_update; if(c.cb[i]->stale < negRun) c.cb[i]->stale = negRun; c.cb[i]->f += 1; c.cb[i]->t_last_update = c.t; break; } } for(int n=0; n<numChannels; n++) { high[n] = *(p+n)+*(cbBounds+n); if(high[n] > 255) high[n] = 255; low[n] = *(p+n)-*(cbBounds+n); if(low[n] < 0) low[n] = 0; } if(i == c.numEntries) { code_element **foo = new code_element* [c.numEntries+1]; for(int ii=0; ii<c.numEntries; ii++) { foo[ii] = c.cb[ii]; } foo[c.numEntries] = new code_element; if(c.numEntries) delete [] c.cb; c.cb = foo; for(int n=0; n<numChannels; n++) { c.cb[c.numEntries]->avg[n] = *(p+n); c.cb[c.numEntries]->max[n] = *(p+n); c.cb[c.numEntries]->min[n] = *(p+n); c.cb[c.numEntries]->learnHigh[n] = high[n]; c.cb[c.numEntries]->learnLow[n] = low[n]; } c.cb[c.numEntries]->f = 1; c.cb[c.numEntries]->stale = 0; c.cb[c.numEntries]->t_first_update = c.t; c.cb[c.numEntries]->t_last_update = c.t; c.numEntries += 1; } for(int s=0; s<c.numEntries; s++) { int negRun = c.t - c.cb[s]->t_last_update; if(c.cb[s]->stale < negRun) c.cb[s]->stale = negRun; } for(int n=0; n<numChannels; n++) { if(c.cb[i]->learnHigh[n] < high[n]) c.cb[i]->learnHigh[n] += 1; if(c.cb[i]->learnLow[n] > low[n]) c.cb[i]->learnLow[n] -= 1; } return(i); } int realtime_clear_stale_entries_cC(codeBook &c, int FrmNum) { int staleThresh = FrmNum/2; int *keep = new int [c.numEntries]; int keepCnt = 0; for(int i=0; i<c.numEntries; i++) { if(c.cb[i]->stale > staleThresh) keep[i] = 0; else { keep[i] = 1; keepCnt += 1; } } c.t = 0; code_element **foo = new code_element* [keepCnt]; int k=0; for(int ii=0; ii<c.numEntries; ii++) { if(keep[ii]) { foo[k] = c.cb[ii]; k++; } } delete [] keep; delete [] c.cb; c.cb = foo; int numCleared = c.numEntries - keepCnt; c.numEntries = keepCnt; return(numCleared); } int realtime_clear_stale_entries_cD(codeBook &c, int FrmNum) { int *keep = new int [c.numEntries]; int keepCnt = 0; for(int i=0; i<c.numEntries; i++) { if(c.cb[i]->f <=Fd && c.cb[i]->stale >=Tavgstale_cD) keep[i] = 0; else { keep[i] = 1; keepCnt += 1; } } code_element **foo = new code_element* [keepCnt]; int k=0; for(int ii=0; ii<c.numEntries; ii++) { if(keep[ii]) { foo[k] = c.cb[ii]; k++; } } delete [] keep; delete [] c.cb; c.cb = foo; int numCleared = c.numEntries - keepCnt; c.numEntries = keepCnt; return(numCleared); } int cD_to_cC(codeBook &d, codeBook &c, int FrmNum) { int *keep_d = new int [d.numEntries]; int keepCnt = 0; for(int i=0; i<d.numEntries; i++) { int convertThresh = (FrmNum - T)/d.cb[i]->f; if(d.cb[i]->f >=Fadd && convertThresh <=Tavgstale) { keep_d[i] = 0; } else { keep_d[i] = 1; keepCnt += 1; } } code_element **foo_d = new code_element* [keepCnt]; int k=0; for(int ii=0; ii<d.numEntries; ii++) { if(keep_d[ii]) { foo_d[k] = d.cb[ii]; k++; } else { code_element **foo_c = new code_element* [c.numEntries+1]; for(int jj=0; jj<c.numEntries; jj++) { foo_c[jj] = c.cb[jj]; } foo_c[c.numEntries] = new code_element; delete [] c.cb; c.cb = foo_c; c.cb[c.numEntries] = d.cb[ii]; c.numEntries +=1; } } delete [] keep_d; delete [] d.cb; d.cb = foo_d; int numconverted = d.numEntries - keepCnt; d.numEntries = keepCnt; return(numconverted); } ``` 下面是报错: 1>------ 已启动生成: 项目: Realtime_online_cb_det, 配置: Debug x64 ------ 1> Realtime_online_cb_det.cpp 1>Realtime_online_cb_det.cpp(185): error C2065: “CvBGStatModel”: 未声明的标识符 1>Realtime_online_cb_det.cpp(185): error C2065: “bg_model”: 未声明的标识符 1>Realtime_online_cb_det.cpp(186): error C2065: “CvBGStatModel”: 未声明的标识符 1>Realtime_online_cb_det.cpp(186): error C2065: “bg_model1”: 未声明的标识符 1>Realtime_online_cb_det.cpp(271): error C2065: “bg_model”: 未声明的标识符 1>Realtime_online_cb_det.cpp(271): error C3861: “cvCreateGaussianBGModel”: 找不到标识符 1>Realtime_online_cb_det.cpp(272): error C2065: “bg_model1”: 未声明的标识符 1>Realtime_online_cb_det.cpp(272): error C3861: “cvCreateFGDStatModel”: 找不到标识符 1>Realtime_online_cb_det.cpp(308): error C3861: “cvChangeDetection”: 找不到标识符 1>Realtime_online_cb_det.cpp(309): error C3861: “cvChangeDetection”: 找不到标识符 1>Realtime_online_cb_det.cpp(315): error C2065: “bg_model”: 未声明的标识符 1>Realtime_online_cb_det.cpp(315): error C3861: “cvUpdateBGStatModel”: 找不到标识符 1>Realtime_online_cb_det.cpp(316): error C2065: “bg_model1”: 未声明的标识符 1>Realtime_online_cb_det.cpp(316): error C3861: “cvUpdateBGStatModel”: 找不到标识符 1>Realtime_online_cb_det.cpp(381): error C2065: “bg_model1”: 未声明的标识符 1>Realtime_online_cb_det.cpp(381): error C2227: “->foreground”的左边必须指向类/结构/联合/泛型类型 1> 类型是“unknown-type” 1>Realtime_online_cb_det.cpp(381): error C2227: “->origin”的左边必须指向类/结构/联合/泛型类型 1>Realtime_online_cb_det.cpp(382): error C2065: “bg_model”: 未声明的标识符 1>Realtime_online_cb_det.cpp(382): error C2227: “->foreground”的左边必须指向类/结构/联合/泛型类型 1> 类型是“unknown-type” 1>Realtime_online_cb_det.cpp(382): error C2227: “->origin”的左边必须指向类/结构/联合/泛型类型 1>Realtime_online_cb_det.cpp(391): error C2065: “bg_model”: 未声明的标识符 1>Realtime_online_cb_det.cpp(391): error C2227: “->foreground”的左边必须指向类/结构/联合/泛型类型 1> 类型是“unknown-type” 1>Realtime_online_cb_det.cpp(391): error C2660: “cvShowImage”: 函数不接受 1 个参数 1>Realtime_online_cb_det.cpp(396): error C2065: “bg_model1”: 未声明的标识符 1>Realtime_online_cb_det.cpp(396): error C2227: “->foreground”的左边必须指向类/结构/联合/泛型类型 1> 类型是“unknown-type” 1>Realtime_online_cb_det.cpp(396): error C2660: “cvShowImage”: 函数不接受 1 个参数 1>Realtime_online_cb_det.cpp(529): error C2065: “CvBGStatModel”: 未声明的标识符 1>Realtime_online_cb_det.cpp(529): error C2059: 语法错误:“)” 1>Realtime_online_cb_det.cpp(530): error C2065: “CvBGStatModel”: 未声明的标识符 1>Realtime_online_cb_det.cpp(530): error C2059: 语法错误:“)” 1>Realtime_online_cb_det.cpp(529): error C3861: “cvReleaseBGStatModel”: 找不到标识符 1>Realtime_online_cb_det.cpp(530): error C3861: “cvReleaseBGStatModel”: 找不到标识符 1>Realtime_online_cb_det.cpp(677): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(712): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(714): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(714): error C2227: “->learnHigh”的左边必须指向类/结构/联合/泛型类型 1>Realtime_online_cb_det.cpp(714): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(715): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(715): error C2227: “->learnLow”的左边必须指向类/结构/联合/泛型类型 1>Realtime_online_cb_det.cpp(715): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(717): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(789): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(799): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(801): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(802): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(803): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(804): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(805): error C2065: “n”: 未声明的标识符 1>Realtime_online_cb_det.cpp(824): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(824): error C2227: “->learnHigh”的左边必须指向类/结构/联合/泛型类型 1>Realtime_online_cb_det.cpp(825): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(825): error C2227: “->learnLow”的左边必须指向类/结构/联合/泛型类型 1>Realtime_online_cb_det.cpp(827): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(880): warning C4244: “=”: 从“float”转换到“int”,可能丢失数据 1>Realtime_online_cb_det.cpp(882): warning C4244: “=”: 从“float”转换到“int”,可能丢失数据 1>Realtime_online_cb_det.cpp(938): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(962): warning C4244: “=”: 从“float”转换到“int”,可能丢失数据 1>Realtime_online_cb_det.cpp(964): warning C4244: “=”: 从“float”转换到“int”,可能丢失数据 1>Realtime_online_cb_det.cpp(1024): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(1061): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(1061): error C2227: “->learnHigh”的左边必须指向类/结构/联合/泛型类型 1>Realtime_online_cb_det.cpp(1062): error C2065: “i”: 未声明的标识符 1>Realtime_online_cb_det.cpp(1062): error C2227: “->learnLow”的左边必须指向类/结构/联合/泛型类型 1>Realtime_online_cb_det.cpp(1064): error C2065: “i”: 未声明的标识符 ========== 生成: 成功 0 个,失败 1 个,最新 0 个,跳过 0 个 ==========
Java学习的正确打开方式
在博主认为,对于入门级学习java的最佳学习方法莫过于视频+博客+书籍+总结,前三者博主将淋漓尽致地挥毫于这篇博客文章中,至于总结在于个人,实际上越到后面你会发现学习的最好方式就是阅读参考官方文档其次就是国内的书籍,博客次之,这又是一个层次了,这里暂时不提后面再谈。博主将为各位入门java保驾护航,各位只管冲鸭!!!上天是公平的,只要不辜负时间,时间自然不会辜负你。 何谓学习?博主所理解的学习,它
大学四年自学走来,这些私藏的实用工具/学习网站我贡献出来了
大学四年,看课本是不可能一直看课本的了,对于学习,特别是自学,善于搜索网上的一些资源来辅助,还是非常有必要的,下面我就把这几年私藏的各种资源,网站贡献出来给你们。主要有:电子书搜索、实用工具、在线视频学习网站、非视频学习网站、软件下载、面试/求职必备网站。 注意:文中提到的所有资源,文末我都给你整理好了,你们只管拿去,如果觉得不错,转发、分享就是最大的支持了。 一、PDF搜索网站推荐 对于大部
linux系列之常用运维命令整理笔录
本博客记录工作中需要的linux运维命令,大学时候开始接触linux,会一些基本操作,可是都没有整理起来,加上是做开发,不做运维,有些命令忘记了,所以现在整理成博客,当然vi,文件操作等就不介绍了,慢慢积累一些其它拓展的命令,博客不定时更新 顺便拉下票,我在参加csdn博客之星竞选,欢迎投票支持,每个QQ或者微信每天都可以投5票,扫二维码即可,http://m234140.nofollow.ax.
Vue + Spring Boot 项目实战(十四):用户认证方案与完善的访问拦截
本篇文章主要讲解 token、session 等用户认证方案的区别并分析常见误区,以及如何通过前后端的配合实现完善的访问拦截,为下一步权限控制的实现打下基础。
比特币原理详解
一、什么是比特币 比特币是一种电子货币,是一种基于密码学的货币,在2008年11月1日由中本聪发表比特币白皮书,文中提出了一种去中心化的电子记账系统,我们平时的电子现金是银行来记账,因为银行的背后是国家信用。去中心化电子记账系统是参与者共同记账。比特币可以防止主权危机、信用风险。其好处不多做赘述,这一层面介绍的文章很多,本文主要从更深层的技术原理角度进行介绍。 二、问题引入  假设现有4个人
程序员接私活怎样防止做完了不给钱?
首先跟大家说明一点,我们做 IT 类的外包开发,是非标品开发,所以很有可能在开发过程中会有这样那样的需求修改,而这种需求修改很容易造成扯皮,进而影响到费用支付,甚至出现做完了项目收不到钱的情况。 那么,怎么保证自己的薪酬安全呢? 我们在开工前,一定要做好一些证据方面的准备(也就是“讨薪”的理论依据),这其中最重要的就是需求文档和验收标准。一定要让需求方提供这两个文档资料作为开发的基础。之后开发
网页实现一个简单的音乐播放器(大佬别看。(⊙﹏⊙))
今天闲着无事,就想写点东西。然后听了下歌,就打算写个播放器。 于是乎用h5 audio的加上js简单的播放器完工了。 欢迎 改进 留言。 演示地点跳到演示地点 html代码如下`&lt;!DOCTYPE html&gt; &lt;html&gt; &lt;head&gt; &lt;title&gt;music&lt;/title&gt; &lt;meta charset="utf-8"&gt
Python十大装B语法
Python 是一种代表简单思想的语言,其语法相对简单,很容易上手。不过,如果就此小视 Python 语法的精妙和深邃,那就大错特错了。本文精心筛选了最能展现 Python 语法之精妙的十个知识点,并附上详细的实例代码。如能在实战中融会贯通、灵活使用,必将使代码更为精炼、高效,同时也会极大提升代码B格,使之看上去更老练,读起来更优雅。 1. for - else 什么?不是 if 和 else 才
数据库优化 - SQL优化
前面一篇文章从实例的角度进行数据库优化,通过配置一些参数让数据库性能达到最优。但是一些“不好”的SQL也会导致数据库查询变慢,影响业务流程。本文从SQL角度进行数据库优化,提升SQL运行效率。 判断问题SQL 判断SQL是否有问题时可以通过两个表象进行判断: 系统级别表象 CPU消耗严重 IO等待严重 页面响应时间过长
2019年11月中国大陆编程语言排行榜
2019年11月2日,我统计了某招聘网站,获得有效程序员招聘数据9万条。针对招聘信息,提取编程语言关键字,并统计如下: 编程语言比例 rank pl_ percentage 1 java 33.62% 2 c/c++ 16.42% 3 c_sharp 12.82% 4 javascript 12.31% 5 python 7.93% 6 go 7.25% 7
通俗易懂地给女朋友讲:线程池的内部原理
餐厅的约会 餐盘在灯光的照耀下格外晶莹洁白,女朋友拿起红酒杯轻轻地抿了一小口,对我说:“经常听你说线程池,到底线程池到底是个什么原理?”我楞了一下,心里想女朋友今天是怎么了,怎么突然问出这么专业的问题,但做为一个专业人士在女朋友面前也不能露怯啊,想了一下便说:“我先给你讲讲我前同事老王的故事吧!” 大龄程序员老王 老王是一个已经北漂十多年的程序员,岁数大了,加班加不动了,升迁也无望,于是拿着手里
经典算法(5)杨辉三角
写在前面: 我是 扬帆向海,这个昵称来源于我的名字以及女朋友的名字。我热爱技术、热爱开源、热爱编程。技术是开源的、知识是共享的。 这博客是对自己学习的一点点总结及记录,如果您对 Java、算法 感兴趣,可以关注我的动态,我们一起学习。 用知识改变命运,让我们的家人过上更好的生活。 目录一、杨辉三角的介绍二、杨辉三角的算法思想三、代码实现1.第一种写法2.第二种写法 一、杨辉三角的介绍 百度
腾讯算法面试题:64匹马8个跑道需要多少轮才能选出最快的四匹?
昨天,有网友私信我,说去阿里面试,彻底的被打击到了。问了为什么网上大量使用ThreadLocal的源码都会加上private static?他被难住了,因为他从来都没有考虑过这个问题。无独有偶,今天笔者又发现有网友吐槽了一道腾讯的面试题,我们一起来看看。 腾讯算法面试题:64匹马8个跑道需要多少轮才能选出最快的四匹? 在互联网职场论坛,一名程序员发帖求助到。二面腾讯,其中一个算法题:64匹
面试官:你连RESTful都不知道我怎么敢要你?
面试官:了解RESTful吗? 我:听说过。 面试官:那什么是RESTful? 我:就是用起来很规范,挺好的 面试官:是RESTful挺好的,还是自我感觉挺好的 我:都挺好的。 面试官:… 把门关上。 我:… 要干嘛?先关上再说。 面试官:我说出去把门关上。 我:what ?,夺门而去 文章目录01 前言02 RESTful的来源03 RESTful6大原则1. C-S架构2. 无状态3.统一的接
SQL-小白最佳入门sql查询一
一 说明 如果是初学者,建议去网上寻找安装Mysql的文章安装,以及使用navicat连接数据库,以后的示例基本是使用mysql数据库管理系统; 二 准备前提 需要建立一张学生表,列分别是id,名称,年龄,学生信息;本示例中文章篇幅原因SQL注释略; 建表语句: CREATE TABLE `student` ( `id` int(11) NOT NULL AUTO_INCREMENT, `
项目中的if else太多了,该怎么重构?
介绍 最近跟着公司的大佬开发了一款IM系统,类似QQ和微信哈,就是聊天软件。我们有一部分业务逻辑是这样的 if (msgType = "文本") { // dosomething } else if(msgType = "图片") { // doshomething } else if(msgType = "视频") { // doshomething } else { // dosho
漫话:什么是平衡(AVL)树?这应该是把AVL树讲的最好的文章了
这篇文章通过对话的形式,由浅入深带你读懂 AVL 树,看完让你保证理解 AVL 树的各种操作,如果觉得不错,别吝啬你的赞哦。 1、若它的左子树不为空,则左子树上所有的节点值都小于它的根节点值。 2、若它的右子树不为空,则右子树上所有的节点值均大于它的根节点值。 3、它的左右子树也分别可以充当为二叉查找树。 例如: 例如,我现在想要查找数值为14的节点。由于二叉查找树的特性,我们可...
“狗屁不通文章生成器”登顶GitHub热榜,分分钟写出万字形式主义大作
一、垃圾文字生成器介绍 最近在浏览GitHub的时候,发现了这样一个骨骼清奇的雷人项目,而且热度还特别高。 项目中文名:狗屁不通文章生成器 项目英文名:BullshitGenerator 根据作者的介绍,他是偶尔需要一些中文文字用于GUI开发时测试文本渲染,因此开发了这个废话生成器。但由于生成的废话实在是太过富于哲理,所以最近已经被小伙伴们给玩坏了。 他的文风可能是这样的: 你发现,
程序员:我终于知道post和get的区别
IT界知名的程序员曾说:对于那些月薪三万以下,自称IT工程师的码农们,其实我们从来没有把他们归为我们IT工程师的队伍。他们虽然总是以IT工程师自居,但只是他们一厢情愿罢了。 此话一出,不知激起了多少(码农)程序员的愤怒,却又无可奈何,于是码农问程序员。 码农:你知道get和post请求到底有什么区别? 程序员:你看这篇就知道了。 码农:你月薪三万了? 程序员:嗯。 码农:你是怎么做到的? 程序员:
《程序人生》系列-这个程序员只用了20行代码就拿了冠军
你知道的越多,你不知道的越多 点赞再看,养成习惯GitHub上已经开源https://github.com/JavaFamily,有一线大厂面试点脑图,欢迎Star和完善 前言 这一期不算《吊打面试官》系列的,所有没前言我直接开始。 絮叨 本来应该是没有这期的,看过我上期的小伙伴应该是知道的嘛,双十一比较忙嘛,要值班又要去帮忙拍摄年会的视频素材,还得搞个程序员一天的Vlog,还要写BU
开源并不是你认为的那些事
点击上方蓝字 关注我们开源之道导读所以 ————想要理清开源是什么?先要厘清开源不是什么,名正言顺是句中国的古代成语,概念本身的理解非常之重要。大部分生物多样性的起源,...
加快推动区块链技术和产业创新发展,2019可信区块链峰会在京召开
      11月8日,由中国信息通信研究院、中国通信标准化协会、中国互联网协会、可信区块链推进计划联合主办,科技行者协办的2019可信区块链峰会将在北京悠唐皇冠假日酒店开幕。   区块链技术被认为是继蒸汽机、电力、互联网之后,下一代颠覆性的核心技术。如果说蒸汽机释放了人类的生产力,电力解决了人类基本的生活需求,互联网彻底改变了信息传递的方式,区块链作为构造信任的技术有重要的价值。   1
程序员把地府后台管理系统做出来了,还有3.0版本!12月7号最新消息:已在开发中有github地址
第一幕:缘起 听说阎王爷要做个生死簿后台管理系统,我们派去了一个程序员…… 996程序员做的梦: 第一场:团队招募 为了应对地府管理危机,阎王打算找“人”开发一套地府后台管理系统,于是就在地府总经办群中发了项目需求。 话说还是中国电信的信号好,地府都是满格,哈哈!!! 经常会有外行朋友问:看某网站做的不错,功能也简单,你帮忙做一下? 而这次,面对这样的需求,这个程序员
网易云6亿用户音乐推荐算法
网易云音乐是音乐爱好者的集聚地,云音乐推荐系统致力于通过 AI 算法的落地,实现用户千人千面的个性化推荐,为用户带来不一样的听歌体验。 本次分享重点介绍 AI 算法在音乐推荐中的应用实践,以及在算法落地过程中遇到的挑战和解决方案。 将从如下两个部分展开: AI 算法在音乐推荐中的应用 音乐场景下的 AI 思考 从 2013 年 4 月正式上线至今,网易云音乐平台持续提供着:乐屏社区、UGC
【技巧总结】位运算装逼指南
位算法的效率有多快我就不说,不信你可以去用 10 亿个数据模拟一下,今天给大家讲一讲位运算的一些经典例子。不过,最重要的不是看懂了这些例子就好,而是要在以后多去运用位运算这些技巧,当然,采用位运算,也是可以装逼的,不信,你往下看。我会从最简单的讲起,一道比一道难度递增,不过居然是讲技巧,那么也不会太难,相信你分分钟看懂。 判断奇偶数 判断一个数是基于还是偶数,相信很多人都做过,一般的做法的代码如下
《C++ Primer》学习笔记(六):C++模块设计——函数
专栏C++学习笔记 《C++ Primer》学习笔记/习题答案 总目录 https://blog.csdn.net/TeFuirnever/article/details/100700212 —————————————————————————————————————————————————————— 《C++ Primer》习题参考答案:第6章 - C++模块设计——函数 文章目录专栏C+...
8年经验面试官详解 Java 面试秘诀
    作者 | 胡书敏 责编 | 刘静 出品 | CSDN(ID:CSDNnews) 本人目前在一家知名外企担任架构师,而且最近八年来,在多家外企和互联网公司担任Java技术面试官,前后累计面试了有两三百位候选人。在本文里,就将结合本人的面试经验,针对Java初学者、Java初级开发和Java开发,给出若干准备简历和准备面试的建议。   Java程序员准备和投递简历的实
面试官如何考察你的思维方式?
1.两种思维方式在求职面试中,经常会考察这种问题:北京有多少量特斯拉汽车? 某胡同口的煎饼摊一年能卖出多少个煎饼? 深圳有多少个产品经理? 一辆公交车里能装下多少个乒乓球? 一
so easy! 10行代码写个"狗屁不通"文章生成器
前几天,GitHub 有个开源项目特别火,只要输入标题就可以生成一篇长长的文章。背后实现代码一定很复杂吧,里面一定有很多高深莫测的机器学习等复杂算法不过,当我看了源代码之后这程序不到50
知乎高赞:中国有什么拿得出手的开源软件产品?(整理自本人原创回答)
知乎高赞:中国有什么拿得出手的开源软件产品? 在知乎上,有个问题问“中国有什么拿得出手的开源软件产品(在 GitHub 等社区受欢迎度较好的)?” 事实上,还不少呢~ 本人于2019.7.6进行了较为全面的 回答 - Bravo Yeung,获得该问题下回答中得最高赞(236赞和1枚专业勋章),对这些受欢迎的 Github 开源项目分类整理如下: 分布式计算、云平台相关工具类 1.SkyWalk
相关热词 c#委托 逆变与协变 c#新建一个项目 c#获取dll文件路径 c#子窗体调用主窗体事件 c# 拷贝目录 c# 调用cef 网页填表c#源代码 c#部署端口监听项目、 c#接口中的属性使用方法 c# 昨天
立即提问