linux epoll_wait 监听管道读写事情,为什么管道退出,仍有事件发生 10C

//通过epoll监控管道的读端
#include
#include
#include
#include
#include
#include
#include
#include

int main()
{
//管道
int fd[2];
pipe(fd);
//创建子进程
pid_t pid = fork();
if(pid == 0){
//子进程
//子进程写dd
close(fd[0]);//关闭读端
char buf[12]={0};
buf[10]='\n';
char ch = 'A';
while(1){
memset(buf,ch,10);//将字符串设置为AAAAAAAAAA
ch++;
write(fd[1],buf,strlen(buf));
sleep(3);
break;
}
}else{

    //父进程读
    close(fd[1]);//关闭写端
    //--创建根节点
    int epfd = epoll_create(1);
    //--加入监听节点
    struct epoll_event ev,epv;
    ev.data.fd = fd[0];
    //ev.events = EPOLLIN|EPOLLET;//添加边缘触发模式
    ev.events = EPOLLIN;//水平触发
    epoll_ctl(epfd,EPOLL_CTL_ADD,fd[0],&ev);//上树
    //int flags = fcntl(fd[0],F_GETFL);
    //flags |= O_NONBLOCK;
    //fcntl(fd[0],F_SETFL,flags);//设置文件描述符为非阻塞
    while(1){

        //--循环等待事件产生
        int ready = epoll_wait(epfd,&epv,1,-1);
        printf("ready--------===%d\n",ready);
        //--读内容,输出到屏幕
        char buf[6];
        memset(buf,0x00,sizeof(buf));
        int ret = read(epv.data.fd,buf,sizeof(buf));
        //write(STDOUT_FILENO,buf,ret);
        printf("read:%s\n",buf);
    }
}
return 0;

}

这里面用epoll 模式,在管道间通信。子进程退出循环了。但是父进程还是能够接受到事件。

2个回答

你确定子进程退出循环了吗?write是阻塞的哦。

qhkdh
qhkdh 是的,退出了。就是父进程始终有事件没处理掉,所以一直在循环。这个,事件,就是子进程在退出的时候,产生的一个事件。但是父进程,又是在读管道,他收 到事件之后,读不到数据,事件又没被处理,eploo_wait就一直认为有事件在,一直不阻塞。所以,一直在循环。
接近 3 年之前 回复

是的,退出了。就是父进程始终有事件没处理掉,所以一直在循环。这个,事件,就是子进程在退出的时候,产生的一个事件。但是父进程,又是在读管道,他收
到事件之后,读不到数据,事件又没被处理,eploo_wait就一直认为有事件在,一直不阻塞。所以,一直在循环。

Csdn user default icon
上传中...
上传图片
插入图片
抄袭、复制答案,以达到刷声望分或其他目的的行为,在CSDN问答是严格禁止的,一经发现立刻封号。是时候展现真正的技术了!
其他相关推荐
Linux下epoll并发数量达到1987个后涨不上去

Linux下epoll并发数量达到1987个后涨不上去(达到1987个链接后,无法接受新链接,并非最大开文件句柄限制所导致) 我在linux下写来一个简单的epoll server程序,在局域网中另一台windows计算机采用多线程的形式链接server,但是大概epoll链接了1987个套接字后,再也不能增加新链接了(并非最大文件句柄数量所限制),不清楚所什么原因,跪求解答,谢谢各位好心人。 server代码: #include <unistd.h> #include <sys/types.h> /* basic system data types */ #include <sys/socket.h> /* basic socket definitions */ #include <netinet/in.h> /* sockaddr_in{} and other Internet defns */ #include <arpa/inet.h> /* inet(3) functions */ #include <sys/epoll.h> /* epoll function */ #include <fcntl.h> /* nonblocking */ #include <sys/resource.h> /*setrlimit */ #include <stdlib.h> #include <errno.h> #include <stdio.h> #include <string.h> #define MAXEPOLLSIZE 10000 #define MAXLINE 10240 int handle(int connfd); int setnonblocking(int sockfd) { if (fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFD, 0)|O_NONBLOCK) == -1) { return -1; } return 0; } int main(int argc, char **argv) { int servPort = 6888; int listenq = 1024; int listenfd, connfd, kdpfd, nfds, n, nread, curfds,acceptCount = 0; struct sockaddr_in servaddr, cliaddr; socklen_t socklen = sizeof(struct sockaddr_in); struct epoll_event ev; struct epoll_event events[MAXEPOLLSIZE]; struct rlimit rt; char buf[MAXLINE]; /* 设置每个进程允许打开的最大文件数 */ rt.rlim_max = rt.rlim_cur = MAXEPOLLSIZE; if (setrlimit(RLIMIT_NOFILE, &rt) == -1) { perror("setrlimit error"); return -1; } bzero(&servaddr, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_addr.s_addr = htonl (INADDR_ANY); servaddr.sin_port = htons (servPort); listenfd = socket(AF_INET, SOCK_STREAM, 0); if (listenfd == -1) { perror("can't create socket file"); return -1; } int opt = 1; setsockopt(listenfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); if (setnonblocking(listenfd) < 0) { perror("setnonblock error"); } if (bind(listenfd, (struct sockaddr *) &servaddr, sizeof(struct sockaddr)) == -1) { perror("bind error"); return -1; } if (listen(listenfd, listenq) == -1) { perror("listen error"); return -1; } /* 创建 epoll 句柄,把监听 socket 加入到 epoll 集合里 */ kdpfd = epoll_create(MAXEPOLLSIZE); ev.events = EPOLLIN | EPOLLET; ev.data.fd = listenfd; if (epoll_ctl(kdpfd, EPOLL_CTL_ADD, listenfd, &ev) < 0) { fprintf(stderr, "epoll set insertion error: fd=%d\n", listenfd); return -1; } curfds = 1; printf("epollserver startup,port %d, max connection is %d, backlog is %d\n", servPort, MAXEPOLLSIZE, listenq); for (;;) { /* 等待有事件发生 */ nfds = epoll_wait(kdpfd, events, curfds, -1); if (nfds == -1) { perror("epoll_wait"); continue; } /* 处理所有事件 */ for (n = 0; n < nfds; ++n) { if (events[n].data.fd == listenfd) { connfd = accept(listenfd, (struct sockaddr *)&cliaddr,&socklen); if (connfd < 0) { perror("accept error"); continue; } sprintf(buf, "accept form %s:%d\n", inet_ntoa(cliaddr.sin_addr), cliaddr.sin_port); printf("%d:%s", ++acceptCount, buf); if (curfds >= MAXEPOLLSIZE) { fprintf(stderr, "too many connection, more than %d\n", MAXEPOLLSIZE); close(connfd); continue; } if (setnonblocking(connfd) < 0) { perror("setnonblocking error"); } ev.events = EPOLLIN | EPOLLET; ev.data.fd = connfd; if (epoll_ctl(kdpfd, EPOLL_CTL_ADD, connfd, &ev) < 0) { fprintf(stderr, "add socket '%d' to epoll failed: %s\n", connfd, strerror(errno)); return -1; } curfds++; printf("%d\n", curfds); continue; } // 处理客户端请求 if (handle(events[n].data.fd) < 0) { epoll_ctl(kdpfd, EPOLL_CTL_DEL, events[n].data.fd,&ev); curfds--; } } } close(listenfd); return 0; } int handle(int connfd) { int nread; char buf[MAXLINE]; nread = read(connfd, buf, MAXLINE);//读取客户端socket流 if (nread == 0) { printf("client close the connection\n"); close(connfd); return -1; } if (nread < 0) { perror("read error"); close(connfd); return -1; } write(connfd, buf, nread);//响应客户端 return 0; }

关于epoll返回值的问题

epoll\_wait返回值是就绪描述符的数量n,那么下来应该遍历0~maxevents选出这n个描述符,而我看例子代码中是直接遍历0~n的。请指教

epoll异步服务端程序,客户端采用多线程访问,服务端总是返回errno 9和107

#include <stdio.h> #include <fcntl.h> #include <errno.h> #include <signal.h> #include <unistd.h> #include <string.h> #include <pthread.h> #include <sys/stat.h> #include <sys/epoll.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <sys/resource.h> #include <iostream> using namespace std; #define MAX_EVENTS 65535 #define SERVER_PORT 8887 #define LISTEN_BACKLOG 2048 char send_buf[64] = {0}; int setnonblocking(int& sockfd) { if (fcntl(sockfd, F_SETFL, fcntl(sockfd, F_GETFD, 0)|O_NONBLOCK) == -1) { return -1; } return 0; } void* recv_send_data(void *data) { int _socket = *(int *)data; char recvBuf[16] = {0}; int recv_len = -1; int res = 1; while(res) { recv_len = recv(_socket, recvBuf, 13, 0); if(recv_len < 0) { if(errno == EAGAIN || errno == EINTR || errno == EWOULDBLOCK) { continue; } else { cout << "recv error! errno: " << errno << endl; break; } } else if(recv_len == 0) //对端已正常关闭 { res = 0; } if(recv_len == sizeof(recvBuf)) { res = 1; } else { res = 0; } } if(recv_len > 0) { send(_socket, send_buf, strlen(send_buf), 0); } close(_socket); } int main() { signal(SIGPIPE,SIG_IGN); sprintf(send_buf, "%s", "Hello world!"); struct rlimit rt; rt.rlim_max = rt.rlim_cur = 1048576; int epollFd = epoll_create(MAX_EVENTS); setrlimit(RLIMIT_NOFILE, &rt); /*******创建服务端socket,绑定、监听*******/ struct sockaddr_in server_addr; bzero(&server_addr, sizeof(server_addr)); server_addr.sin_family = AF_INET; server_addr.sin_addr.s_addr = htons(INADDR_ANY); server_addr.sin_port = htons(SERVER_PORT); int server_socket = socket(AF_INET, SOCK_STREAM, 0); if(server_socket < 0) { cout << "create server socket failed!" << endl; return -1; } setnonblocking(server_socket); int opt = 1; setsockopt(server_socket, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); if(bind(server_socket, (struct sockaddr*)&server_addr, sizeof(server_addr))) { cout << "server bind failed!" << endl; return -1; } if(listen(server_socket, LISTEN_BACKLOG)) { cout << "server listen failed" << endl; return -1; } /**************向epollFd添加fd监听事件**************/ struct epoll_event server_ev; server_ev.events = EPOLLIN | EPOLLET; server_ev.data.fd = server_socket; if(-1 == epoll_ctl(epollFd, EPOLL_CTL_ADD, server_socket, &server_ev)) { cout << "epoll_ctl server socket failed" << endl; return -1; } while(true) { struct epoll_event events[MAX_EVENTS]; int nfds = epoll_wait(epollFd, events, MAX_EVENTS, -1); if(nfds < 0) { cout << "epoll_wait failed" << endl; return -1; } for(int i = 0; i < nfds; ++i) { if(events[i].data.fd == server_socket) { struct sockaddr_in clientAddr; socklen_t length = sizeof(clientAddr); int remote_socket = accept(events[i].data.fd, (struct sockaddr*)&clientAddr, &length); if(remote_socket < 0) { cout << "accept socket failed!" << endl; continue; } cout << "socket connect successfully" << endl; setnonblocking(remote_socket); struct epoll_event client_ev; client_ev.data.fd = remote_socket; client_ev.events = EPOLLIN | EPOLLET; if(-1 == epoll_ctl(epollFd, EPOLL_CTL_ADD, remote_socket, &client_ev)) { cout << "epoll_ctl client socket failed" << endl; return -1; } } else { pthread_t thread; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); if(pthread_create(&thread, &attr, recv_send_data, (void*)&(events[i].data.fd))) { cout << "create thread failed" << endl; return -1; } } } } close(server_socket); cout << "abort" << endl; return 0; }

Linux 网络编程 epoll中的EPOLLIN EPOLLOUT如何触发

代码很长只截取关键部分 //服务器端 #include<sys/socket.h> #include<netinet/in.h> #include<stdio.h> #include<unistd.h> #include<errno.h> #include<string.h> #include<stdlib.h> #include<cassert> #include<sys/epoll.h> #include"locker.h" #include"threadpool.h" #include"http_conn.h" #include"http_conn.cpp" extern void addfd(int epollfd,int fd,bool one_shot); extern void removefd(int epollfd,int fd); #define MAX_FD 65536 #define MAX_EVENT_NUMBER 10000 void addsig(int sig,void(handler)(int),bool restart=true) { struct sigaction sa; memset(&sa,'\0',sizeof(sa)); sa.sa_handler=handler; if(restart) { sa.sa_flags|=SA_RESTART; } sigfillset(&sa.sa_mask); assert(sigaction(sig,&sa,NULL)!=-1); } void show_error(int connfd,const char* info) { printf("%s",info); send(connfd,info,strlen(info),0); close(connfd); } int main(int argc,char* argv[]) { if(argc<2) { printf("usage: %s ip_address port_number\n",basename(argv[0])); return 1; } const char* ip=argv[1]; int port=atoi(argv[2]); addsig(SIGPIPE,SIG_IGN); threadpool<http_conn>* pool=NULL; pool=new threadpool<http_conn>(3); http_conn* users=new http_conn[MAX_FD]; assert(users); int user_count=0; int listenfd=socket(PF_INET,SOCK_STREAM,0); assert(listenfd>=0); struct linger tmp={1,0}; setsockopt(listenfd,SOL_SOCKET,SO_LINGER,&tmp,sizeof(tmp)); int ret=0; struct sockaddr_in address; bzero(&address,sizeof(address)); address.sin_family=AF_INET; inet_pton(AF_INET,ip,&address.sin_addr); address.sin_port=htons(port); ret=bind(listenfd,(struct sockaddr*)&address,sizeof(address)); assert(ret>=0); ret=listen(listenfd,5); assert(ret>=0); epoll_event events[MAX_EVENT_NUMBER]; int epollfd=epoll_create(5); assert(epollfd!=-1); addfd(epollfd,listenfd,false); http_conn::m_epollfd=epollfd; while(1) { int number=epoll_wait(epollfd,events,MAX_EVENT_NUMBER,-1); printf("number is %d\n",number); if((number<0)&&(errno!=EINTR)) { printf("epoll failure\n"); break; } for(int i=0;i<number;i++) { int sockfd=events[i].data.fd; if(sockfd==listenfd) { struct sockaddr_in client_address; socklen_t client_addrlength=sizeof(client_address); int connfd=accept(listenfd,(struct sockaddr*)&client_address,&client_addrlength); if(connfd<0) { printf("errno is: %d\n",errno); continue; } if(http_conn::m_user_count>=MAX_FD) { show_error(connfd,"Internal sever busy"); continue; } printf("running the init(connfd,client_address)"\n); users[connfd].init(connfd,client_address); } else if(events[i].events&(EPOLLRDHUP|EPOLLHUP|EPOLLERR)) { users[sockfd].close_conn(); } else if(events[i].events&EPOLLIN) { if(users[sockfd].read()) { pool->append(users+sockfd); } else { users[sockfd].close_conn(); } } else if(events[i].events&EPOLLOUT) { if(!users[sockfd].write()) { users[sockfd].close_conn(); } } } } close(epollfd); close(listenfd); delete [] users; delete pool; return 0; } 以上是服务器端的主程序 思路是epoll_wait接收到连接就为连接创建一个users存储然后等待后续的操作 但后面EPOLLIN 和EPOLLOUT永远都没法触发 不清楚该怎么触发 另一端写了服务器压力测试程序 和以上代码类似 就是循环创建socket对象然后connect()服务器 但我本意想两端互相发送数据 可connect()后服务器收到创建一个user 继续循环等待 但压力测试程序也在创建完对象后陷入循环等待服务器端的操作 请问该如何触发EPOLLIN和EPOLLOUT信号 以下是压力测试程序关键代码 #include<stdlib.h> #include<stdio.h> #include<assert.h> #include<unistd.h> #include<sys/types.h> #include<sys/epoll.h> #include<fcntl.h> #include<sys/socket.h> #include<netinet/in.h> #include<arpa/inet.h> #include<string.h> static const char* request = "GET http://localhost/index.html HTTP/1.1\r\nConnection: keep-alive\r\n\r\nxxxxxxxxxx"; int setnonblocking(int fd) { int old_option=fcntl(fd,F_GETFL); int new_option=old_option|O_NONBLOCK; fcntl(fd,F_SETFL,new_option); return old_option; } void addfd(int epollfd,int fd) { epoll_event event; event.data.fd=fd; event.events=EPOLLIN|EPOLLET|EPOLLRDHUP; epoll_ctl(epollfd,EPOLL_CTL_ADD,fd,&event); setnonblocking(fd); } bool write_nbytes(int sockfd,const char* buffer,int len) { int byters_write=0; printf("write out %d bytes to socket %d\n",len,sockfd); while(1) { bytes_write=send(sockfd,buffer,len,0); if(bytes_write==-1) { return false; } else if(bytes_write==0) { return false; } len-=bytes_write; buffer=buffer+bytes_write; if(len<=0) { return true; } } } bool read_once(int sockfd,char* buffer,int len) { int bytes_read=0; memset(buffer,'\0',len); bytes_read=recv(sockfd,buffer,len,0); if(bytes_read==-1) { return false; } else if(bytes_read==0) { return false; } printf("read in %d bytes from socket %d with content: %s\n",bytes_read,sockfd,buffer); return true; } void start_conn(int epoll_fd,int num,const char* ip,int port) { int ret=0; struct sockaddr_in address; bzero(&address,sizeof(address)); address.sin_family=AF_INET; inet_pton(AF_INET,ip,&address.sin_addr); address.sin_port=htons(port); for(int i=0;i<num;++i) { sllep(1); int sockfd=socket(PF_INET,SOCK_STREAM,0); printf("create 1 sock\n"); if(sockfd<0) { continue; } if(connect(sockfd,(struct sockaddr*)&address,sizeof(address))==0) { printf("build connection %d\n",i); addfd(epoll_fd,sockfd); } } } void close_conn(int epoll_fd,int sockfd) { epoll_ctl(epoll_fd,EPOLL_CTL_DEL,sockfd,0); close(sockfd); } int main(int argc,char* argv[]) { assert(argc==4); int epoll_fd=epoll_create(100); start_conn(epoll_fd,atoi(argv[3]),argv[1],atoi(argv[2])); epoll_event events[10000]; char buffer[2048]; while(1) { int fds=epoll_wait(epoll_fd,events,10000,2000); for(int i=0;i<fds;i++) { int sockfd=events[i].data.fd; if(event[i].events&EPOLLIN) { if(!read_once(sockfd,buffer,2048)); { close_conn(epoll_fd,sockfd); } struct epoll_event event; event.events=EPOLLOUT|EPOLLET|EPOLLERR; event.data.fd=sockfd; epoll_ctl(epoll_fd,EPOLL_CTL_MOD,sockfd,&event); } else if(events[i].events&EPOLLOUT) { if(!write_nbytes(sockfd,request,strlen(request))) { close_conn(epoll_fd,sockfd); } struct epoll_event event; event.events=EPOLLIN|EPOLLET|EPOLLERR; event.data.fd=sockfd; epoll_ctl(epoll_fd,EPOLL_CTL_MOD,sockfd,&event); } else if(events[i].events&EPOLLERR) { close_conn(epoll_fd,sockfd); } } } }

有关epoll的具体应用问题

服务端采用epoll ET模式,当有请求过来,epoll_wait会告知有数据可读,因此进行 读操作(ET下,这里是一次性读完吗?,如果读取数据很大,那岂不是效率低了?), 同时进行数据处理; 我不明白的是,应该在什么地方关注该fd的写事件---即哪里写 epoll_ctl(epfd, EPOLL_OUT, fd, events) ? epoll的事件触发,EPOLL_IN比较懂,有数据传入,epoll_wait告知; 那EPOLL_OUT呢,我明白在发送缓冲区可写时,会触发EPOLL_OUT,但是这怎么跟服务器的设计联系起来呢?

服务器挂载大量close_wait无法断开

使用netty实现websocket,在网页关闭的时候断开连接,采用的是H5版本的,微信浏览器,在ios系统下离开页面的时候会出现原链接状态变为close_wait状态而不会断开,在安卓版本下不会出现这种情况 大概的情况是这样的 ![图片说明](https://img-ask.csdn.net/upload/201809/13/1536839036_917725.jpg) 代码是这样的,hander这边基本没有其他业务的东西,业务方面都放线程池执行了, 抓包情况分析 ![图片说明](https://img-ask.csdn.net/upload/201809/13/1536840275_123782.jpg) 15570->80端口的,80端口是服务器的端口,这里的是客户端向服务端发送fin,服务端立刻返回ack,但是属于服务端的fin却一直不会发送过来,,这个抓包是我在服务器上用tcpdump抓的网卡的数据流量情况,在本地直接用wireshark抓包也是差不多的情况。 而根据服务器的挂载情况显示挂载nginx上面,我直接关闭服务也不会断开连接,只有重新加载nginx配置或者nginx进行重启才会把close_wait状态修改为last_ack状态,。 还有一个情况就是,这个服务器暂时只有我一个人在使用没有对外开放,所以应该也不存在什么阻塞的情况

Linux 环境下 无法加载 libnetty-transport-native-epoll.so

![图片说明](https://img-ask.csdn.net/upload/201905/29/1559095705_825533.png) ``` pom.xml 依赖如下 <dependency> <groupId>io.netty</groupId> <artifactId>netty-transport-native-epoll</artifactId> <version>4.1.23.Final</version> <classifier>linux-x86_64</classifier> </dependency> Epoll.ensureAvailability(); EventLoopGroup group = Epoll.isAvailable() ? new EpollEventLoopGroup() : new NioEventLoopGroup(); b.group(group) .channel(Epoll.isAvailable() ? EpollDatagramChannel.class : NioDatagramChannel.class) .option(ChannelOption.SO_BROADCAST, true) .option(ChannelOption.SO_RCVBUF, 1024 * 1024) .handler(new ChannelInitializer<NioDatagramChannel>() { @Override public void initChannel(final NioDatagramChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); p.addLast(serverHandler); } }); // linux平台下支持SO_REUSEPORT特性以提高性能 LOGGER.info("Epoll.isAvailable():"+Epoll.isAvailable()); LOGGER.info("Epoll.unavailabilityCause():"+Epoll.unavailabilityCause()); if (Epoll.isAvailable()) { b.option(EpollChannelOption.SO_REUSEPORT, true); } if (Epoll.isAvailable()) { // linux系统下使用SO_REUSEPORT特性,使得多个线程绑定同一个端口 int cpuNum = Runtime.getRuntime().availableProcessors(); LOGGER.info("using epoll reuseport and cpu:" + cpuNum); for (int i = 0; i < cpuNum; i++) { ChannelFuture future = b.bind(port).await(); if (!future.isSuccess()) { throw new Exception("bootstrap bind fail port is " + port); } } } ```

程序偶尔挂起一段时间

<div class="post-text" itemprop="text"> <p>I have written a simple KV database in golang, this database runs smoothly most time, however, it hangs for a few seconds occasionally.</p> <p>The version of golang and linux is:</p> <ol> <li>go version: 1.12.5</li> <li>linux version: CentOS release 6.7 (Final) 2.6.32-573.el6.x86_64</li> </ol> <p>Cpu,Mem,IO info:<a href="https://i.stack.imgur.com/Bq6Bc.png" rel="nofollow noreferrer">CPU</a>, <a href="https://i.stack.imgur.com/lfMiB.png" rel="nofollow noreferrer">MEM</a>, <a href="https://i.stack.imgur.com/c4Yjr.png" rel="nofollow noreferrer">IO</a></p> <p>I've tried some ways to debug this problem:</p> <ol> <li>Import http/pprof package and gops package, however, I fail to get process heap stack because the process is totally unresponsive when it hangs.</li> <li>Strace tool is used to get process syscalls: </li> </ol> <pre class="lang-xml prettyprint-override"><code>process 32573 attached with 178 threads [pid 172460] 11:12:25.260132 futex(0xc1dc4d0f48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 24633] 11:12:25.260197 futex(0xc0dc3012c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 34073] 11:12:25.260209 futex(0xc33b8464c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 34071] 11:12:25.260233 futex(0xc3b259c148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 34069] 11:12:25.260264 futex(0xc1f7eef648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33989] 11:12:25.260275 futex(0xc368336f48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33988] 11:12:25.260285 futex(0xc36795cbc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33987] 11:12:25.260293 futex(0xc368336bc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33986] 11:12:25.260313 futex(0xc36795c848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33984] 11:12:25.260320 futex(0xc36795c4c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33983] 11:12:25.260342 futex(0xc3683364c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33982] 11:12:25.260350 futex(0xc36795c148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33980] 11:12:25.260360 futex(0xc0b79d1d48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33978] 11:12:25.260368 futex(0xc0b79d19c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33977] 11:12:25.260376 futex(0xc3720559c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33974] 11:12:25.260385 futex(0xc0b79d1648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33973] 11:12:25.260393 futex(0xc3720552c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 33972] 11:12:25.260402 futex(0xc4959e4bc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32787] 11:12:25.260411 futex(0xc000a44f48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32786] 11:12:25.260421 futex(0xc0d5df4848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32785] 11:12:25.260431 futex(0xc0d5e5e4c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32784] 11:12:25.260440 futex(0xc0d60184c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32782] 11:12:25.260448 futex(0xc0a0eea4c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32781] 11:12:25.260456 futex(0xc0d5c2c848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32779] 11:12:25.260465 futex(0xc00068ebc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32777] 11:12:25.260473 futex(0xc0d5df44c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32776] 11:12:25.260482 futex(0xc0005d3648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32775] 11:12:25.260491 futex(0xc000a059c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32774] 11:12:25.260500 futex(0xc000a772c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32773] 11:12:25.260508 futex(0xc0d5ed64c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32772] 11:12:25.260518 futex(0xc0d5df24c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32770] 11:12:25.260526 futex(0xc0d5d524c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32767] 11:12:25.260534 futex(0xc0a6c66148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32766] 11:12:25.260542 futex(0xc0d5d4e4c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32763] 11:12:25.260552 futex(0x1cdada0, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32761] 11:12:25.260561 futex(0xc000b54848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32760] 11:12:25.260569 futex(0xc000b60848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32756] 11:12:25.260578 futex(0xc0005d32c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32755] 11:12:25.260586 futex(0xc000a05648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32754] 11:12:25.260594 futex(0xc0d5ed6148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32753] 11:12:25.260603 futex(0xc000185648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32752] 11:12:25.260611 futex(0xc000a76f48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32751] 11:12:25.260620 futex(0xc08a27c148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32750] 11:12:25.260629 futex(0xc0d5e5e148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32748] 11:12:25.260643 futex(0xc075230148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32747] 11:12:25.260652 futex(0xc0d5df4148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32746] 11:12:25.260659 futex(0xc075038148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32745] 11:12:25.260667 futex(0xc0d5df2148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32744] 11:12:25.260675 futex(0xc0004a12c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32743] 11:12:25.260683 futex(0xc073c7e148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32742] 11:12:25.260692 futex(0xc0d5d52148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32741] 11:12:25.260702 futex(0xc0d5d50148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32740] 11:12:25.260711 futex(0xc0d5d4e148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32739] 11:12:25.260718 futex(0xc0d5c2c148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32738] 11:12:25.260726 futex(0xc0d5c2a148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32737] 11:12:25.260734 futex(0xc000a44848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32736] 11:12:25.260742 futex(0xc000b564c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32735] 11:12:25.260750 futex(0xc000b544c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32734] 11:12:25.260758 futex(0xc000b604c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32733] 11:12:25.260766 futex(0xc000b1c4c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32732] 11:12:25.260775 futex(0xc0001a4f48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32728] 11:12:25.260784 futex(0xc0001852c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32727] 11:12:25.260794 restart_syscall(&lt;... resuming interrupted call ...&gt; &lt;unfinished ...&gt; [pid 32725] 11:12:25.260806 futex(0xc00068e4c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32724] 11:12:25.260815 futex(0xc000aa9648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32723] 11:12:25.260823 futex(0xc000aa92c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32722] 11:12:25.260833 futex(0xc00083abc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32721] 11:12:25.260842 futex(0xc000868848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32718] 11:12:25.260851 futex(0xc000a76bc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32651] 11:12:25.260859 futex(0xc000aa8bc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32650] 11:12:25.260867 futex(0xc000aa8848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32649] 11:12:25.260875 futex(0xc0008eb648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32648] 11:12:25.260883 futex(0xc00097dd48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32647] 11:12:25.260891 futex(0xc00097d9c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32646] 11:12:25.260899 futex(0xc00097d648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32644] 11:12:25.260908 futex(0xc0009e1d48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32643] 11:12:25.260918 futex(0xc0008eaf48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32642] 11:12:25.260926 futex(0xc0008eabc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32639] 11:12:25.260935 futex(0xc00097d2c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32638] 11:12:25.260945 futex(0xc00097cf48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32637] 11:12:25.260954 futex(0xc0009e1648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32635] 11:12:25.260963 futex(0xc0008ea4c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32634] 11:12:25.260971 futex(0xc00097c848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32633] 11:12:25.260979 futex(0xc0009a6848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32631] 11:12:25.260986 futex(0xc0009e12c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32630] 11:12:25.260994 futex(0xc000aa84c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32629] 11:12:25.261002 futex(0xc000a76848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32628] 11:12:25.261009 futex(0xc000a04bc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32627] 11:12:25.261024 futex(0xc000b60148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32626] 11:12:25.261032 futex(0xc000a04848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32625] 11:12:25.261040 futex(0xc000b54148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32624] 11:12:25.261047 futex(0xc000b56148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32623] 11:12:25.261067 futex(0xc0009e0f48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32621] 11:12:25.261076 futex(0xc0009e0bc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32620] 11:12:25.261083 futex(0xc0009e0848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32619] 11:12:25.261101 futex(0xc00083a848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32618] 11:12:25.261110 futex(0xc0000f59c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32617] 11:12:25.261118 futex(0xc000a044c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32616] 11:12:25.261127 futex(0xc00097c4c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32613] 11:12:25.261136 futex(0xc0005d2bc8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32612] 11:12:25.261144 futex(0xc000184f48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32611] 11:12:25.261152 futex(0xc0009e04c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32610] 11:12:25.261160 futex(0xc000a444c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32609] 11:12:25.261169 futex(0xc000a764c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32608] 11:12:25.261178 futex(0xc000aa8148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32606] 11:12:25.261186 futex(0xc000a44148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32604] 11:12:25.261194 futex(0xc0009e0148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32603] 11:12:25.261207 futex(0xc0009a6148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32602] 11:12:25.261216 futex(0xc00097c148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32601] 11:12:25.261224 futex(0xc0008ea148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32600] 11:12:25.261232 futex(0xc000868148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32598] 11:12:25.261240 restart_syscall(&lt;... resuming interrupted call ...&gt; &lt;unfinished ...&gt; [pid 32596] 11:12:25.261249 futex(0xc0000c3648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32595] 11:12:25.261257 futex(0xc0004a0848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32594] 11:12:25.261266 futex(0xc000184848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32593] 11:12:25.261276 futex(0xc0005d2848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32592] 11:12:25.261285 futex(0xc0000f5648, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32590] 11:12:25.261294 futex(0xc0001a4848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32588] 11:12:25.261302 futex(0xc0005d2148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32587] 11:12:25.261310 futex(0xc0000f4f48, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32586] 11:12:25.261318 futex(0xc0001a44c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32585] 11:12:25.261325 futex(0xc000184148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32584] 11:12:25.261334 futex(0xc0004a04c8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32579] 11:12:25.261343 futex(0xc0001a4148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32578] 11:12:25.261350 futex(0x1cdacb8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32577] 11:12:25.261359 futex(0xc0000f4148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32575] 11:12:25.261368 futex(0xc0000c2848, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32574] 11:12:25.261377 restart_syscall(&lt;... resuming interrupted call ...&gt; &lt;unfinished ...&gt; [pid 32573] 11:12:25.261385 futex(0x1cbe8e8, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32574] 11:12:25.264099 &lt;... restart_syscall resumed&gt; ) = 0 &lt;0.002704&gt; [pid 32574] 11:12:25.264131 epoll_pwait(4&lt;[eventpoll]&gt;, {}, 128, 0, NULL) = 0 &lt;0.000014&gt; </code></pre> <pre class="lang-xml prettyprint-override"><code>[pid 32574] 11:12:25.264197 nanosleep({0, 10000000}, &lt;unfinished ...&gt; [pid 34072] 11:12:31.236049 read(16699&lt;socket:[371657496]&gt;, &lt;unfinished ...&gt; </code></pre> <pre class="lang-xml prettyprint-override"><code>[pid 34070] 11:12:31.236128 read(16691&lt;socket:[371657488]&gt;, &lt;unfinished ...&gt; [pid 33985] 11:12:31.236142 sched_yield( &lt;unfinished ...&gt; [pid 33981] 11:12:31.236161 futex(0xc06ed9c148, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 33979] 11:12:31.236171 futex(0xc0d5df44c8, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 33976] 11:12:31.236178 futex(0xc3720552c8, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 33971] 11:12:31.236193 read(7443&lt;socket:[371660645]&gt;, &lt;unfinished ...&gt; [pid 33975] 11:12:31.236243 futex(0xc0004a12c8, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 32789] 11:12:31.236253 futex(0xc06ed9c148, FUTEX_WAIT_PRIVATE, 0, NULL &lt;unfinished ...&gt; [pid 32788] 11:12:31.236262 futex(0xc000a76bc8, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 32783] 11:12:31.236270 read(5583&lt;socket:[371824829]&gt;, &lt;unfinished ...&gt; [pid 32780] 11:12:31.236295 read(1258&lt;socket:[371660662]&gt;, &lt;unfinished ...&gt; [pid 32778] 11:12:31.236311 read(2795&lt;socket:[371660629]&gt;, &lt;unfinished ...&gt; [pid 32771] 11:12:31.236339 read(16828&lt;socket:[371657506]&gt;, &lt;unfinished ...&gt; [pid 32769] 11:12:31.236356 read(1163&lt;socket:[371770203]&gt;, &lt;unfinished ...&gt; [pid 32768] 11:12:31.236370 read(16955&lt;socket:[371657515]&gt;, &lt;unfinished ...&gt; [pid 32765] 11:12:31.236385 read(16696&lt;socket:[371657494]&gt;, &lt;unfinished ...&gt; [pid 32764] 11:12:31.236398 futex(0xc1f7eef648, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 32762] 11:12:31.236423 read(9849&lt;socket:[371825259]&gt;, &lt;unfinished ...&gt; [pid 32759] 11:12:31.236437 read(16944&lt;socket:[371657509]&gt;, &lt;unfinished ...&gt; [pid 32758] 11:12:31.236451 read(16684&lt;socket:[371657485]&gt;, &lt;unfinished ...&gt; [pid 32757] 11:12:31.236473 read(3335&lt;socket:[371657499]&gt;, &lt;unfinished ...&gt; [pid 32749] 11:12:31.236493 read(16950&lt;socket:[371657513]&gt;, &lt;unfinished ...&gt; [pid 32731] 11:12:31.236509 futex(0xc000868148, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 32730] 11:12:31.236518 futex(0xc0008ea4c8, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 32729] 11:12:31.236526 futex(0xc0000f5648, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 32726] 11:12:31.236535 futex(0xc0d5e5e4c8, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 32720] 11:12:31.236544 read(6925&lt;socket:[371811451]&gt;, &lt;unfinished ...&gt; [pid 32719] 11:12:31.236562 read(16702&lt;socket:[371657497]&gt;, &lt;unfinished ...&gt; [pid 32717] 11:12:31.236576 read(16713&lt;socket:[371657498]&gt;, &lt;unfinished ...&gt; [pid 32645] 11:12:31.236590 read(6490&lt;socket:[371824831]&gt;, &lt;unfinished ...&gt; [pid 32640] 11:12:31.236608 futex(0xc0d5d4e148, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; [pid 32636] 11:12:31.236619 accept4(4105, &lt;unfinished ...&gt; [pid 32632] 11:12:31.236628 read(9867&lt;socket:[371825263]&gt;, &lt;unfinished ...&gt; [pid 32615] 11:12:31.236648 read(2699&lt;socket:[371813455]&gt;, &lt;unfinished ...&gt; [pid 32614] 11:12:31.236665 futex(0xc00068e4c8, FUTEX_WAKE_PRIVATE, 1 &lt;unfinished ...&gt; </code></pre> <p>GC info(from 11:00 to 11:42):</p> <pre class="lang-xml prettyprint-override"><code>gc 4547 @519870.695s 0%: 0.64+7727+0.098 ms clock, 30+54557/86497/4449+4.7 ms cpu, 30446-&gt;36079-&gt;19800 MB, 33340 MB goal, 48 P GC forced gc 4548 @519998.441s 0%: 1.4+2698+0.028 ms clock, 70+71/32202/90622+1.3 ms cpu, 30366-&gt;30623-&gt;14272 MB, 39601 MB goal, 48 P scvg3465: 921 MB released scvg3465: inuse: 26152, idle: 73618, sys: 99771, released: 52270, consumed: 47501 (MB) GC forced gc 4549 @520121.156s 0%: 0.89+2614+0.029 ms clock, 42+244/31128/89017+1.3 ms cpu, 23255-&gt;23387-&gt;14064 MB, 28545 MB goal, 48 P scvg3466: 35 MB released scvg3466: inuse: 26928, idle: 72843, sys: 99771, released: 51276, consumed: 48494 (MB) GC forced gc 4550 @520243.784s 0%: 0.76+2723+0.036 ms clock, 36+184/32007/89703+1.7 ms cpu, 20919-&gt;21049-&gt;14074 MB, 28129 MB goal, 48 P scvg3467: 17831 MB released scvg3467: inuse: 27049, idle: 72722, sys: 99771, released: 69057, consumed: 30714 (MB) GC forced gc 4551 @520366.531s 0%: 0.78+2736+0.032 ms clock, 37+174/32370/91383+1.5 ms cpu, 20387-&gt;20478-&gt;14120 MB, 28149 MB goal, 48 P scvg3468: 1808 MB released scvg3468: inuse: 28066, idle: 71704, sys: 99771, released: 70795, consumed: 28976 (MB) GC forced gc 4552 @520489.286s 0%: 0.72+2641+0.034 ms clock, 34+258/31377/88676+1.6 ms cpu, 21277-&gt;21360-&gt;14002 MB, 28240 MB goal, 48 P GC forced gc 4553 @520611.942s 0%: 0.41+2648+0.029 ms clock, 20+58/31402/89121+1.4 ms cpu, 21806-&gt;21897-&gt;14362 MB, 28005 MB goal, 48 P scvg3469: 34 MB released scvg3469: inuse: 26701, idle: 73069, sys: 99771, released: 69940, consumed: 29830 (MB) GC forced gc 4554 @520734.608s 0%: 0.55+2685+0.038 ms clock, 26+147/32051/88317+1.8 ms cpu, 21565-&gt;21877-&gt;14352 MB, 28725 MB goal, 48 P scvg3470: 39 MB released scvg3470: inuse: 26641, idle: 73130, sys: 99771, released: 69576, consumed: 30195 (MB) GC forced gc 4555 @520857.307s 0%: 0.43+2614+0.031 ms clock, 20+0.31/31264/91012+1.5 ms cpu, 20760-&gt;20849-&gt;14183 MB, 28704 MB goal, 48 P scvg3471: 394 MB released scvg3471: inuse: 27167, idle: 72604, sys: 99771, released: 69929, consumed: 29841 (MB) GC forced gc 4556 @520979.937s 0%: 0.51+2716+0.026 ms clock, 24+116/32239/91899+1.2 ms cpu, 22069-&gt;22175-&gt;14134 MB, 28366 MB goal, 48 P scvg3472: 720 MB released scvg3472: inuse: 27667, idle: 72104, sys: 99771, released: 69954, consumed: 29817 (MB) GC forced gc 4557 @521102.679s 0%: 0.45+2737+0.051 ms clock, 21+38/32726/89390+2.4 ms cpu, 21606-&gt;21995-&gt;14750 MB, 28269 MB goal, 48 P scvg3473: 25 MB released scvg3473: inuse: 32397, idle: 67369, sys: 99766, released: 66966, consumed: 32800 (MB) GC forced gc 4558 @521225.432s 0%: 6.2+2703+0.037 ms clock, 300+104/32314/90152+1.7 ms cpu, 27118-&gt;27332-&gt;14253 MB, 29500 MB goal, 48 P GC forced gc 4559 @521348.157s 0%: 10+2655+0.028 ms clock, 521+447/31809/89384+1.3 ms cpu, 23382-&gt;23651-&gt;14316 MB, 28507 MB goal, 48 P scvg3474: 51 MB released scvg3474: inuse: 26034, idle: 73736, sys: 99771, released: 66841, consumed: 32930 (MB) GC forced gc 4560 @521470.837s 0%: 9.8+2605+0.026 ms clock, 474+143/30936/90046+1.2 ms cpu, 22733-&gt;22833-&gt;14100 MB, 28632 MB goal, 48 P scvg3475: 79 MB released scvg3475: inuse: 26480, idle: 73290, sys: 99771, released: 66906, consumed: 32864 (MB) GC forced gc 4561 @521593.476s 0%: 0.41+2615+0.046 ms clock, 19+159/30798/88548+2.2 ms cpu, 20629-&gt;20769-&gt;14033 MB, 28201 MB goal, 48 P scvg3476: 2438 MB released scvg3476: inuse: 27789, idle: 71982, sys: 99771, released: 69305, consumed: 30465 (MB) GC forced gc 4562 @521716.106s 0%: 0.51+2673+0.051 ms clock, 24+88/32054/92490+2.4 ms cpu, 21751-&gt;21896-&gt;14198 MB, 28066 MB goal, 48 P scvg3477: 1294 MB released scvg3477: inuse: 29695, idle: 70076, sys: 99771, released: 69666, consumed: 30104 (MB) GC forced gc 4563 @521838.810s 0%: 0.47+2635+0.028 ms clock, 22+111/31434/90845+1.3 ms cpu, 22623-&gt;22705-&gt;14057 MB, 28397 MB goal, 48 P GC forced gc 4564 @521961.461s 0%: 0.46+2727+0.031 ms clock, 22+155/32528/93412+1.5 ms cpu, 19763-&gt;19851-&gt;14138 MB, 28114 MB goal, 48 P scvg3478: 142 MB released scvg3478: inuse: 25743, idle: 74027, sys: 99771, released: 69729, consumed: 30042 (MB) GC forced gc 4565 @522084.205s 0%: 0.44+2657+0.029 ms clock, 21+214/31701/90716+1.4 ms cpu, 20424-&gt;20514-&gt;14179 MB, 28276 MB goal, 48 P scvg3479: 28 MB released scvg3479: inuse: 28391, idle: 71380, sys: 99771, released: 69228, consumed: 30543 (MB) GC forced gc 4566 @522206.879s 0%: 0.38+2652+0.033 ms clock, 18+108/31342/90698+1.6 ms cpu, 23690-&gt;23781-&gt;14146 MB, 28359 MB goal, 48 P scvg3480: 557 MB released scvg3480: inuse: 27188, idle: 72582, sys: 99771, released: 68737, consumed: 31034 (MB) GC forced gc 4567 @522329.545s 0%: 0.55+2585+0.031 ms clock, 26+47/30887/89360+1.5 ms cpu, 22082-&gt;22182-&gt;14378 MB, 28292 MB goal, 48 P scvg3481: 27 MB released scvg3481: inuse: 28056, idle: 71715, sys: 99771, released: 67790, consumed: 31981 (MB) </code></pre> <p>Vmstat info:</p> <pre class="lang-xml prettyprint-override"><code>procs -----------memory---------- ---swap-- -----io---- --system-- -----cpu----- r b swpd free buff cache si so bi bo in cs us sy id wa st 2 0 2538448 965376 985720 193366176 0 0 7468 4144 30760 37141 3 0 97 0 0 0 0 2538448 866052 985720 193425296 0 0 6096 4796 29720 36705 4 0 95 0 0 3 2 2538448 781104 985720 193466992 0 0 8868 4888 31129 40341 3 0 97 0 0 0 0 2538448 684132 985720 193525088 0 0 6456 7256 27751 34601 4 0 95 0 0 1 0 2538448 591752 985728 193568112 0 0 7588 7448 31251 39921 2 1 97 0 0 0 1 2538596 603712 985724 193519232 0 148 9192 5040 85975 41792 3 1 96 0 0 6 1 2538824 581488 985724 193478368 0 228 12044 8736 76500 44506 3 1 96 0 0 1 0 2539052 588596 985720 193401248 0 228 17488 7708 95530 47809 5 1 94 1 0 0 0 2539228 605584 985584 193341440 0 176 7720 3880 85563 38079 3 1 97 0 0 3 0 2539344 598864 985280 193296800 0 116 9784 4296 48443 41580 3 1 96 0 0 3 0 2540228 4245764 959772 189644656 0 884 7732 4464 594631 41907 3 3 94 0 0 0 0 2540588 5716636 958540 188141504 0 360 12832 35124 240269 37573 3 1 96 0 0 2 0 2540588 5614180 958540 188207216 0 0 13784 2984 27044 34481 2 0 97 0 0 0 0 2540588 5433884 958560 188269600 0 0 9644 538844 51809 47156 3 1 94 2 0 3 0 2540588 5333084 958584 188335344 0 0 15500 552292 53730 74434 2 1 93 4 0 4 0 2540588 5201944 958588 188409344 0 0 20760 5628 31898 40993 3 1 96 0 0 3 0 2540588 5110636 958588 188476512 0 0 17764 8540 29821 38110 3 1 97 0 0 0 0 2540588 4966540 958596 188550064 0 0 18972 4364 29049 35744 3 1 96 0 0 1 0 2540588 4816128 958604 188614416 0 0 14372 3448 29185 36645 3 1 97 0 0 0 0 2540588 4655996 958612 188687328 0 0 20540 16648 29179 36005 3 1 96 0 0 0 0 2540588 4476048 958620 188754208 0 0 16456 4308 29029 35896 3 1 96 0 0 procs -----------memory---------- ---swap-- -----io---- --system-- -----cpu----- r b swpd free buff cache si so bi bo in cs us sy id wa st 1 0 2540588 4333448 958620 188818256 0 0 13888 13564 26797 32078 3 1 97 0 0 0 0 2540588 4117432 958620 188887184 0 0 15688 3948 29182 35576 3 1 96 0 0 6 0 2540588 3933804 958620 188948192 0 0 9872 1600 26391 31619 3 1 96 0 0 1 0 2540588 3753960 958628 189011008 0 0 11588 4104 32898 39632 3 1 96 0 0 2 0 2540588 3577940 958632 189074000 0 0 12528 4576 28383 36093 2 1 97 0 0 50 0 2540588 3457128 958648 189134080 0 0 8516 182148 67199 31207 47 1 51 1 0 49 0 2540588 3654512 958664 188890368 0 0 8172 42092 77663 13805 91 1 8 0 0 37 11 2540588 3532092 958664 188962656 0 0 23604 7932 67246 12802 92 1 6 1 0 2 0 2540588 3423520 958676 189049424 0 0 41140 36392 49538 38934 30 1 67 3 0 13 0 2540588 3353936 958676 189119248 0 0 14636 3820 31720 37868 6 1 94 0 0 1 0 2540588 3288452 958684 189185456 0 0 14984 10288 27419 29717 5 1 94 0 0 0 0 2540588 3228116 958684 189244896 0 0 8464 3180 27235 33955 2 0 97 0 0 2 0 2540588 3181408 958684 189290576 0 0 11088 2968 27061 32916 3 0 97 0 0 0 0 2540588 3115948 958692 189356912 0 0 13488 22464 30301 38231 2 0 97 0 0 1 0 2540588 3049144 958700 189423712 0 0 13900 4504 30000 37108 3 1 96 0 0 1 0 2540588 2985992 958708 189486880 0 0 12216 10648 28751 34676 2 0 97 0 0 1 0 2540588 2922520 958708 189551104 0 0 13076 3224 28077 34695 3 0 97 0 0 0 1 2540588 2863000 958708 189611968 0 0 6900 2748 27320 34072 3 0 97 0 0 1 0 2540588 2799620 958720 189674800 0 0 13164 18360 29380 36358 3 1 96 0 0 0 0 2540588 2746148 958728 189727936 0 0 12840 4884 29313 37243 2 0 97 0 0 1 0 2540588 2686116 958740 189788928 0 0 8024 33552 28742 34802 3 0 97 0 0 </code></pre> <p>as the second strace info block shown, when the time was between 11:12:25.264197 and 11:12:31.236049, no syscall was invoked, but the client was still writing data to this database.</p> <p>I have no ideas about what the process is doing when the process hangs.</p> <p><strong>Any suggestions to help me investigate this problem?</strong></p> <p><strong>Is there any tools to get process stack when the process hangs?</strong></p> <p>Maybe I need the answer to this question: <a href="https://stackoverflow.com/questions/23354810/how-can-i-dump-all-a-go-processs-stacks-without-killing-it?noredirect=1&amp;lq=1">How can I dump all a Go process's stacks without killing it?</a></p> </div>

Golang可以启动多线程来处理网络IO

<div class="post-text" itemprop="text"> <p>I want to develop a software to handle request from multiple tcp connections using GoLang, and run on the server with 10Gb-nic.</p> <p>It seems that the performance is not enough to recv/send data on the single core. So I want to implement the software to recv/send data on multiple cpu cores.</p> <p>Then I made a simple test server to check whether GoLang can recv/send data on multiple cpu cores or not. It launch multiple(16) goroutines to start http server on the same listener, and use ab(Apache Benchmark) as client.</p> <p>After the server start, I have seen only one thread invoke EpollWait,but the server launched 18 threads, and when I start ab to test using 16 concurrency, but the server occupy only one core.</p> <p>So the question: is there any way to launch multiple threads to handle data recv/send from multiple tcp connections in GoLang. Or Should I have to invoke syscall.EpollWait to make a Network Framework, to do it myself?</p> <p>The server's test code:</p> <pre><code>package main import ( "io" "log" "net" "net/http" "runtime" ) type HandlerFunction struct{} func (self HandlerFunction) ServeHTTP(w http.ResponseWriter, req *http.Request) { data := "Hello" //fmt.Printf("data_len=%d ", len(data)) io.WriteString(w, string(data)) } func RoutineFunction(hs *http.Server, l net.Listener) { runtime.LockOSThread() err := hs.Serve(l) if err != nil { log.Fatalf("serve fail, err=[%s]", err) } } func main() { runtime.GOMAXPROCS(16) l, err := net.Listen("tcp", "0.0.0.0:12345") if err != nil { log.Fatalf("listen fail, err=[%s]", err) } for i := 0; i &lt; 15; i++ { hs := http.Server{} hs.Handler = HandlerFunction{} go RoutineFunction(&amp;hs, l) } hs := http.Server{} hs.Handler = HandlerFunction{} RoutineFunction(&amp;hs, l) } </code></pre> </div>

Linux网络编程 epoll中EPOLLIN EPOLLOUT信号无法触发

比如 if(events[i].events&EPOLLIN) { int bytes_read=read(m_sockfd,read_buf,READ_BUFFER_SIZE); } 这样会触发EPOLLIN 但如果把read()封装到比如service类的sread()函数中 if(events[i].events&EPOLLIN) { service.sread() } 便不会触发EPOLLIN事件 请问问题出在哪 我使用同一个测试程序 第一种写法就可以触发第二种就不行

java nio的select和linux的epoll有什么区别?

#### 最近在看关于epoll和select相关问题,但是并没有发现java的select和linux的epoll有什么区别 #### java的nio select代码如下 ``` import java.io.IOException; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.nio.ByteBuffer; import java.nio.channels.SelectionKey; import java.nio.channels.Selector; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.nio.charset.Charset; import java.util.HashMap; import java.util.Map; import java.util.Set; import java.util.UUID; public class NioServer { private static Map<String, SocketChannel> clientMap = new HashMap<>(); public static void main(String[] args) throws IOException { ServerSocketChannel serverSocketChannel = ServerSocketChannel.open(); serverSocketChannel.configureBlocking(false); ServerSocket serverSocket = serverSocketChannel.socket(); serverSocket.bind(new InetSocketAddress(8899)); Selector selector = Selector.open(); serverSocketChannel.register(selector, SelectionKey.OP_ACCEPT); while (true) { try { /** * 程序会卡在select()函数,当客户端有动作了,比如连接上了,或者是发送消息过来了,服务端才会继续走 * 当第一个客户端连接上并且是selectionKey.isAcceptable(),代码就又重新卡到了select()函数上 * 等待客户端的再次操作(无论是哪个客户端) */ selector.select(); /** * selector.selectedKeys()这段代码可以从中知道是哪个客户端,执行了什么操作 * */ Set<SelectionKey> selectionKeys = selector.selectedKeys(); selectionKeys.forEach((selectionKey) -> { final SocketChannel client; try { if (selectionKey.isAcceptable()) { ServerSocketChannel server = (ServerSocketChannel) selectionKey.channel(); /** * 这代代码获取了真正的客户端socket句柄 */ client = server.accept(); client.configureBlocking(false); /** * 这句话如果不写,就相当于没有注册当消息可读时的回调函数,当客户端发送消息过来的时候 * 服务端的selector.selectedKeys()就永远不会受到这类消息 */ client.register(selector, SelectionKey.OP_READ); String key = "[" + UUID.randomUUID().toString() + "]"; clientMap.put(key, client); } else if (selectionKey.isReadable()) { client = (SocketChannel) selectionKey.channel(); ByteBuffer readBuffer = ByteBuffer.allocate(1024); int count = client.read(readBuffer); if (count > 0) { readBuffer.flip(); Charset charset = Charset.forName("utf-8"); String recvMsg = String.valueOf(charset.decode(readBuffer).array()); System.out.println(client + ":" + recvMsg); String sendKey = null; for (Map.Entry<String, SocketChannel> stringSocketChannelEntry : clientMap.entrySet()) { if (stringSocketChannelEntry.getValue() == client) { sendKey = stringSocketChannelEntry.getKey(); break; } } for (Map.Entry<String, SocketChannel> stringSocketChannelEntry : clientMap.entrySet()) { SocketChannel socketChannel = stringSocketChannelEntry.getValue(); ByteBuffer writeBuffer = ByteBuffer.allocate(1024); writeBuffer.put((sendKey + ": " + recvMsg).getBytes()); writeBuffer.flip(); socketChannel.write(writeBuffer); } } } } catch (Exception ex) { ex.printStackTrace(); } }); selectionKeys.clear(); } catch (Exception e) { e.printStackTrace(); } } } } ``` #### linux的epoll的c代码如下 ``` #include <iostream> #include <sys/socket.h> #include <sys/epoll.h> #include <netinet/in.h> #include <arpa/inet.h> #include <fcntl.h> #include <unistd.h> #include <stdio.h> #include <errno.h> #include <cstring> using namespace std; #define MAXLINE 5 #define OPEN_MAX 100 #define LISTENQ 20 #define SERV_PORT 5000 #define INFTIM 1000 void setnonblocking(int sock) { int opts; opts=fcntl(sock,F_GETFL); if(opts<0) { perror("fcntl(sock,GETFL)"); exit(1); } opts = opts|O_NONBLOCK; if(fcntl(sock,F_SETFL,opts)<0) { perror("fcntl(sock,SETFL,opts)"); exit(1); } } int main(int argc, char* argv[]) { int i, maxi, listenfd, connfd, sockfd,epfd,nfds, portnumber; ssize_t n; char line[MAXLINE]; socklen_t clilen; if ( 2 == argc ) { if( (portnumber = atoi(argv[1])) < 0 ) { fprintf(stderr,"Usage:%s portnumber/a/n",argv[0]); return 1; } } else { fprintf(stderr,"Usage:%s portnumber/a/n",argv[0]); return 1; } //声明epoll_event结构体的变量,ev用于注册事件,数组用于回传要处理的事件 struct epoll_event ev,events[20]; //生成用于处理accept的epoll专用的文件描述符 //创建一个epoll文件描述符 epfd=epoll_create(256); struct sockaddr_in clientaddr; struct sockaddr_in serveraddr; listenfd = socket(AF_INET, SOCK_STREAM, 0); //把socket设置为非阻塞方式 //setnonblocking(listenfd); //设置与要处理的事件相关的文件描述符 ev.data.fd=listenfd; //设置要处理的事件类型 ev.events=EPOLLIN|EPOLLET; //ev.events=EPOLLIN; //注册epoll事件,将socket文件描述符listenfd的ev事件注册到epoll上 epoll_ctl(epfd,EPOLL_CTL_ADD,listenfd,&ev); memset(&serveraddr, sizeof(serveraddr) ,0); serveraddr.sin_family = AF_INET; char *local_addr="127.0.0.1"; inet_aton(local_addr,&(serveraddr.sin_addr));//htons(portnumber); serveraddr.sin_port=htons(portnumber); //先bind再监听 bind(listenfd,(sockaddr *)&serveraddr, sizeof(serveraddr)); listen(listenfd, LISTENQ); maxi = 0; for ( ; ; ) { //等待epoll事件的发生 //param epfd表示将监听epfd的事件 //param events表示容器,一旦有事件发生,events数组会被填充 nfds=epoll_wait(epfd,events,20,500); //处理所发生的所有事件 for(i=0;i<nfds;++i) { if(events[i].data.fd==listenfd)//如果新监测到一个SOCKET用户连接到了绑定的SOCKET端口,建立新的连接。 { connfd = accept(listenfd,(sockaddr *)&clientaddr, &clilen); if(connfd<0){ perror("connfd<0"); exit(1); } //setnonblocking(connfd); char *str = inet_ntoa(clientaddr.sin_addr); cout << "accapt a connection from " << str << endl; //设置用于读操作的文件描述符 ev.data.fd=connfd; //设置用于注测的读操作事件 ev.events=EPOLLIN|EPOLLET; //ev.events=EPOLLIN; //注册ev epoll_ctl(epfd,EPOLL_CTL_ADD,connfd,&ev); } else if(events[i].events&EPOLLIN)//如果是已经连接的用户,并且收到数据,那么进行读入。 { cout << "EPOLLIN" << endl; if ( (sockfd = events[i].data.fd) < 0) continue; if ( (n = read(sockfd, line, MAXLINE)) < 0) { if (errno == ECONNRESET) { close(sockfd); events[i].data.fd = -1; } else std::cout<<"readline error"<<std::endl; } else if (n == 0) { close(sockfd); events[i].data.fd = -1; } line[n] = '/0'; cout << "read " << line << endl; //设置用于写操作的文件描述符 ev.data.fd=sockfd; //设置用于注测的写操作事件 ev.events=EPOLLOUT|EPOLLET; //修改sockfd上要处理的事件为EPOLLOUT //epoll_ctl(epfd,EPOLL_CTL_MOD,sockfd,&ev); } else if(events[i].events&EPOLLOUT) // 如果有数据发送 { sockfd = events[i].data.fd; write(sockfd, line, n); //设置用于读操作的文件描述符 ev.data.fd=sockfd; //设置用于注测的读操作事件 ev.events=EPOLLIN|EPOLLET; //修改sockfd上要处理的事件为EPOLIN epoll_ctl(epfd,EPOLL_CTL_MOD,sockfd,&ev); } } } return 0; } ``` #### 从上面两段代码看好像基本思想都是一样的,并没有传说中的select不知道发生的事件,只能通过循环去判断的情况。 #### 但是后来我又在网上找了一段linux的select实现的网络io代码 ``` #include<stdio.h> #include<sys/types.h> #include<sys/socket.h> #include<unistd.h> #include<stdlib.h> #include<errno.h> #include<arpa/inet.h> #include<netinet/in.h> #include<string.h> #include<signal.h> #define MAXLINE 1024 #define LISTENLEN 10 #define SERV_PORT 6666 int main(int argc, char **argv) { int i, maxi, maxfd, listenfd, connfd, sockfd; int nready, client[FD_SETSIZE]; ssize_t n; fd_set rset, allset; char buf[MAXLINE]; socklen_t clilen; struct sockaddr_in cliaddr, servaddr; listenfd = socket(AF_INET, SOCK_STREAM, 0); bzero(&servaddr, sizeof(servaddr)); servaddr.sin_family = AF_INET; servaddr.sin_addr.s_addr = htonl(INADDR_ANY); servaddr.sin_port = htons(SERV_PORT); bind(listenfd, (struct sockaddr*) &servaddr, sizeof(servaddr)); listen(listenfd, LISTENLEN); maxfd = listenfd; /* initialize */ maxi = -1; /* index into client[] array */ for (i = 0; i < FD_SETSIZE; i++) client[i] = -1; /* -1 indicates available entry */ FD_ZERO(&allset); FD_SET(listenfd, &allset); for ( ; ; ) { rset = allset; /* structure assignment */ nready = select(maxfd+1, &rset, NULL, NULL, NULL); if (FD_ISSET(listenfd, &rset)) /* new client connection */ { clilen = sizeof(cliaddr); connfd = accept(listenfd, (struct sockaddr*) &cliaddr, &clilen); #ifdef NOTDEF printf("new client: %s, port %d\n", inet_ntop(AF_INET, &cliaddr.sin_addr, 4, NULL), ntohs(cliaddr.sin_port)); #endif for (i = 0; i < FD_SETSIZE; i++) if (client[i] < 0) { client[i] = connfd; /* save descriptor */ break; } if (i == FD_SETSIZE) { printf("too many clients"); exit(0); } FD_SET(connfd, &allset); /* add new descriptor to set */ if (connfd > maxfd) maxfd = connfd; /* for select */ if (i > maxi) maxi = i; /* max index in client[] array */ if (--nready <= 0) continue; /* no more readable descriptors */ } for (i = 0; i <= maxi; i++) /* check all clients for data */ { if ( (sockfd = client[i]) < 0) continue; if (FD_ISSET(sockfd, &rset)) { if ( (n = read(sockfd, buf, MAXLINE)) == 0)/* connection closed by client */ { close(sockfd); FD_CLR(sockfd, &allset); client[i] = -1; } else write(sockfd, buf, n); if (--nready <= 0) break; /* no more readable descriptors */ } } } } ``` #### 从这段代码里面确实是可以看出是通过循环判断的,我的问题是java的select是不是就是linux的epoll的思想?

关于epoll的问题,发送缓冲区以及接受缓冲区?

本人刚学习epoll,向论坛大神问几个问题 本人理解,ET只有在文件描述符未就绪变为就绪时才会重新通过内核来告知,导致每一次的读取必须将缓冲区内数据读完,即处理完该事件 但是我们自己设定的用户空间缓冲区buf是有大小的,假如小于sockfd通告窗口大小,还是说我们会一般将用户空间的buf大小就写成和通告的窗口大小写成一致,则不存在,一次无法将缓冲区读完的情况发生 以下是tcp回射程序的部分 ``` else if(events[i].events&EPOLLIN) { if((sockfd=events[i].data.fd)<0) continue; while(rs) { if((n=read(sockfd,buf,MAXLINE))<0) { if(errno==ECONNRESET||errno=EAGAIN) { close(sockfd); events[i].data.fd=-1; } else if(n==0) { close(sockfd); events[i].data.fd=-1; } } if(n==sizeof(buf)) rs=1; else rs=0; } ev.data.fd=sockfd; ev.events=EPOLLOUT|EPOLLET; epoll_ctl(efd,EPOLL_CTL_MOD,sockfd,&ev); } ``` 如何一直读sockfd,这是本人自己理解的 epoll但由于一直没有用epoll将该sockfd改成写,无法将buf内的数据写到sockfd,如何改正,或者如何正确的写出epoll函数, 我在网上找了很多种实现epoll的都并没有持续读直到无法读取那一部分的代码。一直不理解epoll如何实现这一过程,同理write过程也有这样一个问题,(若没写完buf内的内容必须持续写),但后面写的部分由于无法读取会覆盖前面写的部分,可能问题有些混乱,谢谢各位大神

使用epoll出现问题,偶然性,有时候有问题,有时候又是正常的。

我用一个线程监听,如果有数据可读,就通知另一个线程去读。下面是监听线程的代码。出现的问题就是三次握手后,服务端就自动发送了一个FIN报文,接着客户端发送数据就会收到RST void ListenThread::run() { int max_epoll=pconf->value("max_connect","1024").toInt(); short listen_port=pconf->value("listen_port","9000").toShort(); int res; struct sockaddr_in server_addr; server_addr.sin_family=AF_INET; server_addr.sin_port=htons(listen_port); server_addr.sin_addr.s_addr=htonl(INADDR_ANY); int listen_socket=socket(AF_INET,SOCK_STREAM,0); // int opts=fcntl(listen_socket,F_GETFL); // fcntl(listen_socket,F_SETFL,opts|O_NONBLOCK); res=::bind(listen_socket,(sockaddr *)&server_addr,sizeof(server_addr)); if(res!=0){ emit listen_error(); return; } res=::listen(listen_socket,max_epoll); if(res!=0){ emit listen_error(); return; } emit listen_success(); struct epoll_event ev,events[64]; int epfd=epoll_create(max_epoll); EpollEventData *eed=new EpollEventData; eed->fd=listen_socket; ev.data.ptr=eed; ev.events=EPOLLIN; epoll_ctl(epfd,EPOLL_CTL_ADD,listen_socket,&ev); while(isRun){ //qDebug()<<"listen......"; int res=epoll_wait(epfd,events,64,100); if(res<0){ emit listen_error(); } for(int i=0;i<res;i++){ if(((EpollEventData *)events[i].data.ptr)->fd==listen_socket){//有新的连接请求 //qDebug()<<"new connection in"; sockaddr_in con_addr; socklen_t addr_len; int con_socket=::accept(listen_socket,(sockaddr *)&con_addr,&addr_len); int opts=fcntl(con_socket,F_GETFL); fcntl(con_socket,F_SETFL,opts|O_NONBLOCK); EpollEventData * eed_con=new EpollEventData; struct epoll_event ev_con; eed_con->fd=con_socket; inet_ntop(AF_INET,&con_addr.sin_addr,eed_con->ip,INET_ADDRSTRLEN); ev_con.data.ptr=eed_con; ev_con.events=EPOLLIN|EPOLLONESHOT|EPOLLET; epoll_ctl(epfd,EPOLL_CTL_ADD,con_socket,&ev_con); //qDebug()<<"new connection add"; } else if(events[i].events & EPOLLIN){//有socket数据可读 //qDebug()<<"can read"; EpollEventData *ed=(EpollEventData *)events[i].data.ptr; pthread_mutex_lock(&mutex_socket); pSocketList->push_back(ed); pthread_cond_signal(&cond_socket); pthread_mutex_unlock(&mutex_socket); //epoll_ctl(epfd,EPOLL_CTL_DEL,ed->fd,&events[i]); } else{//差错处理 } } } delete eed; ::close(listen_socket); ::close(epfd); } ![图片说明](https://img-ask.csdn.net/upload/201509/22/1442929182_831705.png)

TIME_WAIT特别多怎么办

![![![图片说明](https://img-ask.csdn.net/upload/201703/13/1489395767_309535.png)图片说明](https://img-ask.csdn.net/upload/201703/13/1489395759_441670.png)图片说明](https://img-ask.csdn.net/upload/201703/13/1489395752_556425.png)

Linux epoll 使用过程中的疑问,请专家给予指点一下

else if(events[i].events&EPOLLIN)//如果是已经连接的用户,并且收到数据,那么进行读入。 { cout << "EPOLLIN" << endl; if ( (sockfd = events[i].data.fd) < 0) continue; if ( (n = read(sockfd, line, MAXLINE)) < 0) { if (errno == ECONNRESET) { close(sockfd); events[i].data.fd = -1; } else std::cout<<"readline error"<<std::endl; } else if (n == 0) { close(sockfd); events[i].data.fd = -1; } line[n] = '/0'; cout << "read " << line << endl; //设置用于写操作的文件描述符 ev.data.fd=sockfd; //设置用于注测的写操作事件 ev.events=EPOLLOUT|EPOLLET; //修改sockfd上要处理的事件为EPOLLOUT epoll_ctl(epfd,EPOLL_CTL_MOD,sockfd,&ev); } else if(events[i].events&EPOLLOUT) // 如果有数据发送 { sockfd = events[i].data.fd; write(sockfd, line, n); //设置用于读操作的文件描述符 ev.data.fd=sockfd; //设置用于注测的读操作事件 ev.events=EPOLLIN|EPOLLET; //修改sockfd上要处理的事件为EPOLIN epoll_ctl(epfd,EPOLL_CTL_MOD,sockfd,&ev); } 在这段程序中,接收过一次消息后,就将套接字描述符的事件模式设置为EPOLLOUT,为什么要这么做呢,我如果不需要写数据呢! 请大神指点一下吧,非常感谢!

ACE在ubuntu上的编译错误

ACE/TAO-5.6.9 gcc version 5.4.0 Ubuntu 16.04.2 执行以下命令时: $ tar xjvf ACE-5.6.9.tar.bz2 $ cd ACE_wrappers $ mkdir build $ cd build $ ../configure --prefix=/usr/local $ make 出现错误: ../../ace/Dev_Poll_Reactor.cpp: In member function 'int ACE_Dev_Poll_Reactor::dispatch_io_event(ACE_Dev_Poll_Reactor::Token_Guard&)': ../../ace/Dev_Poll_Reactor.cpp:1216:41: error: cannot bind packed field 'pfds->epoll_event::events' to '__uint32_t& {aka unsigned int&}' __uint32_t &revents = pfds->events; ^ Makefile:3306: recipe for target 'libACE_la-Dev_Poll_Reactor.lo' failed make[3]: *** [libACE_la-Dev_Poll_Reactor.lo] Error 1 make[3]: Leaving directory '/opt/ACE_wrappers/build/ace' Makefile:5192: recipe for target 'all-recursive' failed make[2]: *** [all-recursive] Error 1 make[2]: Leaving directory '/opt/ACE_wrappers/build/ace' Makefile:2506: recipe for target 'all' failed make[1]: *** [all] Error 2 make[1]: Leaving directory '/opt/ACE_wrappers/build/ace' Makefile:444: recipe for target 'all-recursive' failed make: *** [all-recursive] Error 1 怎么解决?

linux如何采用epoll模型?就是我要改哪个地方才可以用这种模型了呢~

我在网上搜,都是说这个模型的实现原理,还说有三个参数但是这东西到底怎么用啊。我现在有一个游戏项目,前后端(java websocket)两种服务器,,中间nginx负载,我需要怎么捣鼓,才可以用上这个epoll模型。 是在内核的什么文件里加个什么代码吗,还是要在代码里配置什么的~求大佬解惑~

Linux Epoll 接收大文件

最近在写一个文件版本管理服务器,服务器用的Linux系统,用epoll +多线程 接收大文件时发现,文件会被分成好几个小包发送和接收。 请问大佬们 在服务器端应该怎样把文件融合在一起。

epoll监听同一个http请求,一会能监听到,一会又监听不到

自己写了个程序,监听用户的http请求。但是发送请求时出现了上面那种现象:同一个请求第一次发送,epoll发现不到监听套接字可读。第二次再发送一遍,epoll就能发现可读事件;第3次发送又检查不到,第四次。。。。。。。

技术大佬:我去,你写的 switch 语句也太老土了吧

昨天早上通过远程的方式 review 了两名新来同事的代码,大部分代码都写得很漂亮,严谨的同时注释也很到位,这令我非常满意。但当我看到他们当中有一个人写的 switch 语句时,还是忍不住破口大骂:“我擦,小王,你丫写的 switch 语句也太老土了吧!” 来看看小王写的代码吧,看完不要骂我装逼啊。 private static String createPlayer(PlayerTypes p...

副业收入是我做程序媛的3倍,工作外的B面人生是怎样的?

提到“程序员”,多数人脑海里首先想到的大约是:为人木讷、薪水超高、工作枯燥…… 然而,当离开工作岗位,撕去层层标签,脱下“程序员”这身外套,有的人生动又有趣,马上展现出了完全不同的A/B面人生! 不论是简单的爱好,还是正经的副业,他们都干得同样出色。偶尔,还能和程序员的特质结合,产生奇妙的“化学反应”。 @Charlotte:平日素颜示人,周末美妆博主 大家都以为程序媛也个个不修边幅,但我们也许...

我说我不会算法,阿里把我挂了。

不说了,字节跳动也反手把我挂了。

抖音上很火的时钟效果

反正,我的抖音没人看,别人都有几十万个赞什么的。 发到CSDN上来,大家交流下~ 主要用到原生态的 JS+CSS3。 具体不解释了,看注释: &lt;!DOCTYPE html&gt; &lt;html lang="en"&gt; &lt;head&gt; &lt;meta charset="UTF-8"&gt; &lt;title&gt;Title&lt;/tit...

记录下入职中软一个月(外包华为)

我在年前从上一家公司离职,没想到过年期间疫情爆发,我也被困在家里,在家呆着的日子让人很焦躁,于是我疯狂的投简历,看面试题,希望可以进大公司去看看。 我也有幸面试了我觉得还挺大的公司的(虽然不是bat之类的大厂,但是作为一名二本计算机专业刚毕业的大学生bat那些大厂我连投简历的勇气都没有),最后选择了中软,我知道这是一家外包公司,待遇各方面甚至不如我的上一家公司,但是对我而言这可是外包华为,能...

培训班出来的人后来都怎么样了?(二)

接着上回说,培训班学习生涯结束了。后面每天就是无休止的背面试题,不是没有头脑的背,培训公司还是有方法的,现在回想当时背的面试题好像都用上了,也被问到了。回头找找面试题,当时都是打印下来天天看,天天背。 不理解呢也要背,面试造飞机,上班拧螺丝。班里的同学开始四处投简历面试了,很快就有面试成功的,刚开始一个,然后越来越多。不知道是什么原因,尝到胜利果实的童鞋,不满足于自己通过的公司,嫌薪水要少了,选择...

面试了一个 31 岁程序员,让我有所触动,30岁以上的程序员该何去何从?

最近面试了一个31岁8年经验的程序猿,让我有点感慨,大龄程序猿该何去何从。

大三实习生,字节跳动面经分享,已拿Offer

说实话,自己的算法,我一个不会,太难了吧

程序员垃圾简历长什么样?

已经连续五年参加大厂校招、社招的技术面试工作,简历看的不下于万份 这篇文章会用实例告诉你,什么是差的程序员简历! 疫情快要结束了,各个公司也都开始春招了,作为即将红遍大江南北的新晋UP主,那当然要为小伙伴们做点事(手动狗头)。 就在公众号里公开征简历,义务帮大家看,并一一点评。《启舰:春招在即,义务帮大家看看简历吧》 一石激起千层浪,三天收到两百多封简历。 花光了两个星期的所有空闲时...

推荐9个能让你看一天的网站

分享的这9个保证另你意外的网站,每个都非常实用!非常干货!毫不客气的说,这些网站最少值10万块钱。 利用好这些网站,会让你各方面的技能都得到成长,不说让你走上人生巅峰,但对比现在的你,在眼界、学识、技能方面都有质的飞跃。 一、AIRPANO 传送门:https://www.airpano.com/360photo_list.php 这是一个可以躺在家里,就能环游世界的神奇网站。 世界那么大,绝大多...

大牛都会用的IDEA调试技巧!!!

导读 前天面试了一个985高校的实习生,问了他平时用什么开发工具,他想也没想的说IDEA,于是我抛砖引玉的问了一下IDEA的调试用过吧,你说说怎么设置断点...

都前后端分离了,咱就别做页面跳转了!统统 JSON 交互

文章目录1. 无状态登录1.1 什么是有状态1.2 什么是无状态1.3 如何实现无状态1.4 各自优缺点2. 登录交互2.1 前后端分离的数据交互2.2 登录成功2.3 登录失败3. 未认证处理方案4. 注销登录 这是本系列的第四篇,有小伙伴找不到之前文章,松哥给大家列一个索引出来: 挖一个大坑,Spring Security 开搞! 松哥手把手带你入门 Spring Security,别再问密...

97年世界黑客编程大赛冠军作品(大小仅为16KB),惊艳世界的编程巨作

这是世界编程大赛第一名作品(97年Mekka ’97 4K Intro比赛)汇编语言所写。 整个文件只有4095个字节, 大小仅仅为16KB! 不仅实现了3D动画的效果!还有一段震撼人心的背景音乐!!! 内容无法以言语形容,实在太强大! 下面是代码,具体操作看最后! @echo off more +1 %~s0|debug e100 33 f6 bf 0 20 b5 10 f3 a5...

不要再到处使用 === 了

我们知道现在的开发人员都使用 === 来代替 ==,为什么呢?我在网上看到的大多数教程都认为,要预测 JavaScript 强制转换是如何工作这太复杂了,因此建议总是使用===。这些都...

什么是a站、b站、c站、d站、e站、f站、g站、h站、i站、j站、k站、l站、m站、n站?00后的世界我不懂!

A站 AcFun弹幕视频网,简称“A站”,成立于2007年6月,取意于Anime Comic Fun,是中国大陆第一家弹幕视频网站。A站以视频为载体,逐步发展出基于原生内容二次创作的完整生态,拥有高质量互动弹幕,是中国弹幕文化的发源地;拥有大量超粘性的用户群体,产生输出了金坷垃、鬼畜全明星、我的滑板鞋、小苹果等大量网络流行文化,也是中国二次元文化的发源地。 B站 全称“哔哩哔哩(bilibili...

十个摸鱼,哦,不对,是炫酷(可以玩一整天)的网站!!!

文章目录前言正文**1、Kaspersky Cyberthreat real-time map****2、Finding Home****3、Silk – Interactive Generative Art****4、Liquid Particles 3D****5、WINDOWS93****6、Staggering Beauty****7、Ostagram图片生成器网址****8、全历史网址*...

终于,月薪过5万了!

来看几个问题想不想月薪超过5万?想不想进入公司架构组?想不想成为项目组的负责人?想不想成为spring的高手,超越99%的对手?那么本文内容是你必须要掌握的。本文主要详解bean的生命...

大厂的 404 页面都长啥样?最后一个笑了...

每天浏览各大网站,难免会碰到404页面啊。你注意过404页面么?猿妹搜罗来了下面这些知名网站的404页面,以供大家欣赏,看看哪个网站更有创意: 正在上传…重新上传取消 腾讯 正在上传…重新上传取消 网易 淘宝 百度 新浪微博 正在上传…重新上传取消 新浪 京东 优酷 腾讯视频 搜...

自从喜欢上了B站这12个UP主,我越来越觉得自己是个废柴了!

不怕告诉你,我自从喜欢上了这12个UP主,哔哩哔哩成为了我手机上最耗电的软件,几乎每天都会看,可是吧,看的越多,我就越觉得自己是个废柴,唉,老天不公啊,不信你看看…… 间接性踌躇满志,持续性混吃等死,都是因为你们……但是,自己的学习力在慢慢变强,这是不容忽视的,推荐给你们! 都说B站是个宝,可是有人不会挖啊,没事,今天咱挖好的送你一箩筐,首先啊,我在B站上最喜欢看这个家伙的视频了,为啥 ,咱撇...

代码注释如此沙雕,会玩还是你们程序员!

某站后端代码被“开源”,同时刷遍全网的,还有代码里的那些神注释。 我们这才知道,原来程序员个个都是段子手;这么多年来,我们也走过了他们的无数套路… 首先,产品经理,是永远永远吐槽不完的!网友的评论也非常扎心,说看这些代码就像在阅读程序员的日记,每一页都写满了对产品经理的恨。 然后,也要发出直击灵魂的质问:你是尊贵的付费大会员吗? 这不禁让人想起之前某音乐app的穷逼Vip,果然,穷逼在哪里都是...

一场疫情,炸出了退休的COBOL程序员

COBOL编程语言,估计大多数程序员从没听说过,我这样的编程老司机,也是只闻其名,从未一睹芳容。出门问了问度娘,答案如下:COBOL语言,是一种面向过程的高级程序设计语言,主要用于数据...

爬虫(101)爬点重口味的

小弟最近在学校无聊的很哪,浏览网页突然看到一张图片,都快流鼻血。。。然后小弟冥思苦想,得干一点有趣的事情python 爬虫库安装https://s.taobao.com/api?_ks...

讲真,这两款idea插件,能治愈你英语不好的病

时不时就有小伙伴问我,“二哥,能推荐一款 IDE 吗?”你看这话问的,现在搞 Java 的不都在用 Intellij IDEA 吗,还用得着推荐(我已经和 Eclipse 分手了)。然后小伙伴又说,“二哥,IDEA 支持中文吗?我英语不太好。”你看这话问的,搞编程的,英语不好是硬伤啊! 不过,随着 IDEA 最新版(版本号是 2020.1)的发布,英语不好的病可以彻底治愈了。为什么这么说呢?因为 ...

在拼多多上班,是一种什么样的体验?我心态崩了呀!

之前有很多读者咨询我:武哥,在拼多多上班是一种什么样的体验?由于一直很忙,没抽出时间来和大家分享。上周末特地花点时间来写了一篇文章,跟大家分享一下拼多多的日常。 1. 倒时差的作息 可能很多小伙伴都听说了,拼多多加班很严重。这怎么说呢?作息上确实和其他公司有点区别,大家知道 996,那么自然也就能理解拼多多的“11 11 6”了。 所以当很多小伙伴早上出门时,他们是这样的: 我们是这样的: 当...

又一起程序员被抓事件

就在昨天互联网又发生一起让人心酸的程序员犯罪事件,著名的百度不限速下载软件 Pandownload PC 版作者被警方抓获。案件大致是这样的:软件的作者不仅非法盗取用户数据,还在QQ群进...

瑞德西韦重症用药结果再曝光,上百名重症一周内好转,股价大涨19%

郭一璞 发自 凹非寺量子位 报道 | 公众号 QbitAI期盼已久的瑞德西韦临床数据,现在“偷跑”了。在芝加哥大学医学院的临床试验中,125名病人参与,大部分人都已经出院,其中只有2名病...

应聘3万的职位,有必要这么刁难我么。。。沙雕。。。

又一次被面试官带到坑里面了。面试官:springmvc用过么?我:用过啊,经常用呢面试官:springmvc中为什么需要用父子容器?我:嗯。。。没听明白你说的什么。面试官:就是contr...

Vue商城——详情页功能

详情页实现思路 点击商品进去详情页,根据点击请求更加详细的信息,要传过来goodsItem的iid,根据id去服务器请求更加详细的信息;配置路由映射关系,点击进行跳转,带参数传递跳转 itemClick(){ this.$router.push('/detail/'+this.goodsItem.iid) /* this.$router.push({ ...

太狠了,疫情期间面试,一个问题砍了我5000!

疫情期间找工作确实有点难度,想拿到满意的薪资,确实要点实力啊!面试官:Spring中的@Value用过么,介绍一下我:@Value可以标注在字段上面,可以将外部配置文件中的数据,比如可以...

自学编程的 6 个致命误区

嗨,小伙伴们大家好,我是沉默王二。本篇文章来和大家聊聊自学编程中的一些误区——这是我在 B 站上看了羊哥的一期视频后有感而发的文章。因为确实有很多读者也曾私信问过我这些方面的问题,很有代表性,所以我就结合自己的亲身体会来谈一谈,希望对小伙伴们有所启发。 01、追求时髦 所谓基础不牢,地动山摇啊。可很多小伙伴压根就没注意过这个问题,市面上出什么新鲜的技术就想去尝试,结果把自己学的乱七八糟,心灰意冷...

相关热词 c#跨线程停止timer c#批量写入sql数据库 c# 自动安装浏览器 c#语言基础考试题 c# 偏移量打印是什么 c# 绘制曲线图 c#框体中的退出函数 c# 按钮透明背景 c# idl 混编出错 c#在位置0处没有任何行
立即提问