nginx-0.8.6的shmtx代码分析
初始化
shmtx应该是shared memory和mutex的缩写,如果开多个worker process,各worker process之间同步要用到这种mutex。
首先在master process中开一段共享内存:
#0 ngx_shm_alloc (shm=0xbfd01a64) at src/os/unix/ngx_shmem.c:16
#1 0x08064619 in ngx_event_module_init (cycle=0x950cbf8) at src/event/ngx_event.c:513
#2 0x0805af4b in ngx_init_cycle (old_cycle=0xbfd01c9c) at src/core/ngx_cycle.c:592
#3 0x0804a492 in main (argc=1, argv=0xbfd01e24) at src/core/nginx.c:317
在ngx_shm_alloc中:
shm->addr = (u_char *) mmap(NULL, shm->size,
PROT_READ|PROT_WRITE,
MAP_ANON|MAP_SHARED, -1, 0);
在ngx_event_module_init中:
if (ngx_shm_alloc(&shm) != NGX_OK) {
return NGX_ERROR;
}
shared = shm.addr;
ngx_accept_mutex_ptr = (ngx_atomic_t *) shared;
if (ngx_shmtx_create(&ngx_accept_mutex, shared, cycle->lock_file.data)
!= NGX_OK)
{
return NGX_ERROR;
}
在ngx_shmtx_create中,将shmtx的lock指向共享内存的首地址(共享内存的开头是ngx_accept_mutex的lock,后半部分还有一些统计信息):
mtx->lock = addr;
处理accept事件
ngx_posted_accept_events是一个event队列,由ngx_accept_mutex保护,在访问前需要调用ngx_trylock_accept_mutex加锁:
#0 ngx_shmtx_trylock (mtx=0x80caf24) at src/core/ngx_shmtx.h:34
#1 0x08066622 in ngx_trylock_accept_mutex (cycle=0x9d62bf8) at src/event/ngx_event_accept.c:261
#2 0x08063ec2 in ngx_process_events_and_timers (cycle=0x9d62bf8) at src/event/ngx_event.c:226
#3 0x0806cf75 in ngx_worker_process_cycle (cycle=0x9d62bf8, data=0x0) at src/os/unix/ngx_process_cycle.c:775
#4 0x0806a8c9 in ngx_spawn_process (cycle=0x9d62bf8, proc=0x806ce6a <ngx_worker_process_cycle>, data=0x0, name=0x80ba1b9 "worker process", respawn=-2)
at src/os/unix/ngx_process.c:194
#5 0x0806c509 in ngx_start_worker_processes (cycle=0x9d62bf8, n=2, type=-2) at src/os/unix/ngx_process_cycle.c:331
#6 0x0806be26 in ngx_master_process_cycle (cycle=0x9d62bf8) at src/os/unix/ngx_process_cycle.c:123
#7 0x0804a69c in main (argc=1, argv=0xbffbb8d4) at src/core/nginx.c:382
然后ngx_process_events_and_timers调用ngx_process_events处理事件,处理之后调用ngx_shmtx_unlock解锁。
ngx_trylock_accept_mutex中调用ngx_shmtx_trylock:
return (*mtx->lock == 0 && ngx_atomic_cmp_set(mtx->lock, 0, ngx_pid));
ngx_atomic_cmp_set在ngx_gcc_atomic_x86.h中定义:
/*
* "cmpxchgl r, [m]":
*
* if (eax == [m]) {
* zf = 1;
* [m] = r;
* } else {
* zf = 0;
* eax = [m];
* }
*
*
* The "r" means the general register.
* The "=a" and "a" are the %eax register.
* Although we can return result in any register, we use "a" because it is
* used in cmpxchgl anyway. The result is actually in %al but not in %eax,
* however, as the code is inlined gcc can test %al as well as %eax,
* and icc adds "movzbl %al, %eax" by itself.
*
* The "cc" means that flags were changed.
*/
static ngx_inline ngx_atomic_uint_t
ngx_atomic_cmp_set(ngx_atomic_t *lock, ngx_atomic_uint_t old,
ngx_atomic_uint_t set)
{
u_char res;
__asm__ volatile (
NGX_SMP_LOCK
" cmpxchgl %3, %1; "
" sete %0; "
: "=a" (res) : "m" (*lock), "a" (old), "r" (set) : "cc", "memory");
return res;
}
解锁:
#define ngx_shmtx_unlock(mtx) (void) ngx_atomic_cmp_set((mtx)->lock, ngx_pid, 0)
处理其它事件
ngx_posted_events是另外一个event队列,存放除accept事件之外的其它事件,这个队列用锁ngx_posted_events_mutex保护,这并不是一个shmtx,而是pthread的mutex,用于线程间的同步。如果没有配置NGX_THREADS,则ngx_mutex_trylock、ngx_mutex_lock、ngx_mutex_unlock都被定义为空操作,详见src/os/unix/ngx_thread.h。如果配置了NGX_THREADS,各线程都往ngx_posted_events队列中添加待处理的事件,需要用这个mutex互斥。
问题
1. 为什么各进程的accept操作需要用mutex来serialize?
2. 为什么从ngx_posted_events队列中取走事件不需要加锁解锁?详见src/event/ngx_event_posted.h,ngx_post_event需要加锁解锁,而ngx_delete_posted_event不需要。猜想可能只有一个线程调用ngx_delete_posted_event。
nginx-0.8.6处理一个index.html请求的源码分析
初始化
在master process中根据配置文件创建listening fd。调用listen和bind:
#0 ngx_open_listening_sockets (cycle=0x9674bf8) at src/core/ngx_connection.c:360
#1 0x08056493 in ngx_init_cycle (old_cycle=0xbfdccd70) at src/core/ngx_cycle.c:569
#2 0x0804b424 in main (argc=1, argv=0xbfdccef4) at src/core/nginx.c:317
s = ngx_socket(ls[i].sockaddr->sa_family, ls[i].type, 0);
...
if (bind(s, ls[i].sockaddr, ls[i].socklen) == -1) {
...
if (listen(s, ls[i].backlog) == -1) {
在worker process中设置listening fd的read event handler是ngx_event_accept:
#0 ngx_event_process_init (cycle=0x8611bf8) at src/event/ngx_event.c:810
#1 0x0806276b in ngx_worker_process_init (cycle=0x8611bf8, priority=<value optimized out>) at src/os/unix/ngx_process_cycle.c:941
#2 0x08062ba3 in ngx_worker_process_cycle (cycle=0x8611bf8, data=0x0) at src/os/unix/ngx_process_cycle.c:699
#3 0x0806148d in ngx_spawn_process (cycle=0x8611bf8, proc=0x8062b8b <ngx_worker_process_cycle>, data=0x0, name=0x8099148 "worker process", respawn=-2)
at src/os/unix/ngx_process.c:194
#4 0x080621ca in ngx_start_worker_processes (cycle=0x8611bf8, n=1, type=-2) at src/os/unix/ngx_process_cycle.c:331
#5 0x080630df in ngx_master_process_cycle (cycle=0x8611bf8) at src/os/unix/ngx_process_cycle.c:123
#6 0x0804b5bc in main (argc=1, argv=0xbf908a24) at src/core/nginx.c:382
ls = cycle->listening.elts;
...
for (i = 0; i < cycle->listening.nelts; i++) {
c = ngx_get_connection(ls[i].fd, cycle->log);
...
rev = c->read;
...
rev->handler = ngx_event_accept;
客户端发起连接
在worker process中调用ngx_event_accept接受连接,得到connection fd:
#0 ngx_event_accept (ev=0x90bec08) at src/event/ngx_event_accept.c:19
#1 0x08063ff5 in ngx_epoll_process_events (cycle=0x9095bf8, timer=4294967295, flags=<value optimized out>) at src/event/modules/ngx_epoll_module.c:518
#2 0x0805ce5a in ngx_process_events_and_timers (cycle=0x9095bf8) at src/event/ngx_event.c:245
#3 0x08062c3c in ngx_worker_process_cycle (cycle=0x9095bf8, data=0x0) at src/os/unix/ngx_process_cycle.c:775
#4 0x0806148d in ngx_spawn_process (cycle=0x9095bf8, proc=0x8062b8b <ngx_worker_process_cycle>, data=0x0, name=0x8099148 "worker process", respawn=-2)
at src/os/unix/ngx_process.c:194
#5 0x080621ca in ngx_start_worker_processes (cycle=0x9095bf8, n=1, type=-2) at src/os/unix/ngx_process_cycle.c:331
#6 0x080630df in ngx_master_process_cycle (cycle=0x9095bf8) at src/os/unix/ngx_process_cycle.c:123
#7 0x0804b5bc in main (argc=1, argv=0xbff988b4) at src/core/nginx.c:382
lc = ev->data;
ls = lc->listening;
s = accept(lc->fd, (struct sockaddr *) sa, &socklen);
...
c = ngx_get_connection(s, ev->log);
...
ls->handler(c);
接着调用这个handler:
ngx_http_init_connection (c=0x90a7cb8) at src/http/ngx_http_request.c:179
rev = c->read;
rev->handler = ngx_http_init_request;
最后把connection fd的read event handler设置为ngx_http_init_request。
HTTP请求/响应
从ngx_http_init_request开始,找到合适的handler处理请求,再经过一系列filter之后发送出去:
#0 ngx_linux_sendfile_chain (c=0x8148cb8, in=0x813ced4, limit=0) at src/os/unix/ngx_linux_sendfile_chain.c:56
#1 0x08071a41 in ngx_http_write_filter (r=0x8136220, in=0xbfa834d8) at src/http/ngx_http_write_filter_module.c:238
#2 0x0807e1e9 in ngx_http_chunked_body_filter (r=0x8136220, in=0x0) at src/http/modules/ngx_http_chunked_filter_module.c:84
#3 0x08082ae4 in ngx_http_gzip_body_filter (r=0x8136220, in=0xbfa834d8) at src/http/modules/ngx_http_gzip_filter_module.c:304
#4 0x08083791 in ngx_http_postpone_filter (r=0x8136220, in=0x0) at src/http/ngx_http_postpone_filter_module.c:82
#5 0x08084579 in ngx_http_charset_body_filter (r=0x8136220, in=0xbfa834d8) at src/http/modules/ngx_http_charset_filter_module.c:552
#6 0x08087193 in ngx_http_ssi_body_filter (r=0x8136220, in=0x8136220) at src/http/modules/ngx_http_ssi_filter_module.c:394
#7 0x0804dd0a in ngx_output_chain (ctx=0x813cedc, in=0xbfa834d8) at src/core/ngx_output_chain.c:67
#8 0x08071d1f in ngx_http_copy_filter (r=0x8136220, in=0xbfa834d8) at src/http/ngx_http_copy_filter_module.c:110
#9 0x0807e479 in ngx_http_range_body_filter (r=0x8136220, in=0xbfa834d8) at src/http/modules/ngx_http_range_filter_module.c:555
#10 0x08066fbe in ngx_http_output_filter (r=0x8136220, in=0xbfa834d8) at src/http/ngx_http_core_module.c:1689
#11 0x0807d7ae in ngx_http_static_handler (r=0x8136220) at src/http/modules/ngx_http_static_module.c:258
#12 0x08069ff4 in ngx_http_core_content_phase (r=0x8136220, ph=0x81457c8) at src/http/ngx_http_core_module.c:1261
#13 0x08066c85 in ngx_http_core_run_phases (r=0x8136220) at src/http/ngx_http_core_module.c:796
#14 0x08066dae in ngx_http_handler (r=0x8136220) at src/http/ngx_http_core_module.c:779
#15 0x08069396 in ngx_http_internal_redirect (r=0x8136220, uri=0xbfa83668, args=0x81363b0) at src/http/ngx_http_core_module.c:2182
#16 0x0807e0ca in ngx_http_index_handler (r=0x8136220) at src/http/modules/ngx_http_index_module.c:264
#17 0x08069ff4 in ngx_http_core_content_phase (r=0x8136220, ph=0x81457b0) at src/http/ngx_http_core_module.c:1261
#18 0x08066c85 in ngx_http_core_run_phases (r=0x8136220) at src/http/ngx_http_core_module.c:796
#19 0x08066dae in ngx_http_handler (r=0x8136220) at src/http/ngx_http_core_module.c:779
#20 0x0806e968 in ngx_http_process_request (r=0x8136220) at src/http/ngx_http_request.c:1576
#21 0x0806ef57 in ngx_http_process_request_headers (rev=0x815fc70) at src/http/ngx_http_request.c:1038
#22 0x0806f3db in ngx_http_process_request_line (rev=0x815fc70) at src/http/ngx_http_request.c:848
#23 0x0806d06e in ngx_http_init_request (rev=0x815fc70) at src/http/ngx_http_request.c:508
#24 0x08063ff5 in ngx_epoll_process_events (cycle=0x8136bf8, timer=60000, flags=<value optimized out>) at src/event/modules/ngx_epoll_module.c:518
#25 0x0805ce5a in ngx_process_events_and_timers (cycle=0x8136bf8) at src/event/ngx_event.c:245
#26 0x08062c3c in ngx_worker_process_cycle (cycle=0x8136bf8, data=0x0) at src/os/unix/ngx_process_cycle.c:775
#27 0x0806148d in ngx_spawn_process (cycle=0x8136bf8, proc=0x8062b8b <ngx_worker_process_cycle>, data=0x0, name=0x8099148 "worker process", respawn=-2)
at src/os/unix/ngx_process.c:194
#28 0x080621ca in ngx_start_worker_processes (cycle=0x8136bf8, n=1, type=-2) at src/os/unix/ngx_process_cycle.c:331
#29 0x080630df in ngx_master_process_cycle (cycle=0x8136bf8) at src/os/unix/ngx_process_cycle.c:123
#30 0x0804b5bc in main (argc=1, argv=0xbfa83ba4) at src/core/nginx.c:382
最后一步ngx_linux_sendfile_chain,传进去的chain list有两个节点,一个是header的buf,一个是index.html的fd,在ngx_linux_sendfile_chain中分别用writev和sendfile发送出去。
(gdb) p *in->buf
$22 = {
pos = 0x813cdec "HTTP/1.1 200 OK\r\nServer: nginx/0.8.6\r\nDate: Mon, 27 Jul 2009 07:16:02 GMT\r\nContent-Type: text/html\r\nContent-Length: 151\r\nLast-Modified: Sun, 26 Jul 2009 13:15:30 GMT\r\nConnection: keep-alive\r\nAccept-Ra"..., last = 0x813cec3 "", file_pos = 0, file_last = 0,
start = 0x813cdec "HTTP/1.1 200 OK\r\nServer: nginx/0.8.6\r\nDate: Mon, 27 Jul 2009 07:16:02 GMT\r\nContent-Type: text/html\r\nContent-Length: 151\r\nLast-Modified: Sun, 26 Jul 2009 13:15:30 GMT\r\nConnection: keep-alive\r\nAccept-Ra"..., end = 0x813ced4 "��\023\b\034�\023\b", tag = 0x0, file = 0x0, shadow = 0x0, temporary = 1, memory = 0,
mmap = 0, recycled = 0, in_file = 0, flush = 0, sync = 0, last_buf = 0, last_in_chain = 0, last_shadow = 0, temp_file = 0, num = 0}
(gdb) p *in->next->buf
$23 = {pos = 0x0, last = 0x0, file_pos = 0, file_last = 151, start = 0x0, end = 0x0, tag = 0x0, file = 0x813cd34, shadow = 0x0, temporary = 0, memory = 0, mmap = 0,
recycled = 0, in_file = 1, flush = 0, sync = 0, last_buf = 1, last_in_chain = 1, last_shadow = 0, temp_file = 0, num = 0}
(gdb) p *in->next->buf.file
$24 = {fd = 12, name = {len = 32, data = 0x813ccc7 "/usr/local/nginx/html/index.html"}, info = {st_dev = 0, __pad1 = 0, __st_ino = 0, st_mode = 0, st_nlink = 0, st_uid = 0,
st_gid = 0, st_rdev = 0, __pad2 = 0, st_size = 0, st_blksize = 0, st_blocks = 0, st_atim = {tv_sec = 0, tv_nsec = 0}, st_mtim = {tv_sec = 0, tv_nsec = 0}, st_ctim = {
tv_sec = 0, tv_nsec = 0}, st_ino = 0}, offset = 0, sys_offset = 0, log = 0x813f948, valid_info = 0, directio = 0}