# HG changeset patch # User Maxim Dounin # Date 1314894208 -14400 # Node ID 4bd862efb9d6fb8c6dc4c4846a0879d97978ae94 # Parent 83b74d7020ba5d02b51f9e2e162bca943d47c486 Workaround for cpu hog on errors with cached connections. Just doing another connect isn't safe as peer.get() may expect peer.tries to be strictly positive (this is the case e.g. with round robin with multiple upstream servers). Increment peer.tries to at least avoid cpu hog in round robin balancer (with the patch alert will be seen instead). This is not enough to fully address the problem though, hence TODO. We should be able to inform balancer that the error wasn't considered fatal and it may make sense to retry the same peer. diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -2812,6 +2812,10 @@ ngx_http_upstream_next(ngx_http_request_ if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) { status = 0; + /* TODO: inform balancer instead */ + + u->peer.tries++; + } else { switch(ft_type) { # HG changeset patch # User Maxim Dounin # Date 1314894208 -14400 # Node ID b4b33695f121a030ab1ae593241a01bec6dbde86 # Parent 4bd862efb9d6fb8c6dc4c4846a0879d97978ae94 Upstream: separate pool for peer connections. This is required to support persistant https connections as various ssl structures are allocated from connection's pool. diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -1136,8 +1136,20 @@ ngx_http_upstream_connect(ngx_http_reque c->sendfile &= r->connection->sendfile; u->output.sendfile = c->sendfile; - c->pool = r->pool; + if (c->pool == NULL) { + + /* we need separate pool here to be able to cache SSL connections */ + + c->pool = ngx_create_pool(128, r->connection->log); + if (c->pool == NULL) { + ngx_http_upstream_finalize_request(r, u, + NGX_HTTP_INTERNAL_SERVER_ERROR); + return; + } + } + c->log = r->connection->log; + c->pool->log = c->log; c->read->log = c->log; c->write->log = c->log; @@ -2890,6 +2902,7 @@ ngx_http_upstream_next(ngx_http_request_ } #endif + ngx_destroy_pool(u->peer.connection->pool); ngx_close_connection(u->peer.connection); } @@ -2984,6 +2997,7 @@ ngx_http_upstream_finalize_request(ngx_h "close http upstream connection: %d", u->peer.connection->fd); + ngx_destroy_pool(u->peer.connection->pool); ngx_close_connection(u->peer.connection); } # HG changeset patch # User Maxim Dounin # Date 1314889887 -14400 # Node ID cae8308da3d56319fa972d2caae0e9d2c8b3d718 # Parent b4b33695f121a030ab1ae593241a01bec6dbde86 Upstream: content_length_n API change. We no longer use r->headers_out.content_length_n as a primary source of backend's response length. Instead we parse response length to u->headers_in.content_length_n and copy to r->headers_out.content_length_n when needed. diff --git a/src/http/modules/ngx_http_memcached_module.c b/src/http/modules/ngx_http_memcached_module.c --- a/src/http/modules/ngx_http_memcached_module.c +++ b/src/http/modules/ngx_http_memcached_module.c @@ -344,8 +344,8 @@ found: while (*p && *p++ != CR) { /* void */ } - r->headers_out.content_length_n = ngx_atoof(len, p - len - 1); - if (r->headers_out.content_length_n == -1) { + u->headers_in.content_length_n = ngx_atoof(len, p - len - 1); + if (u->headers_in.content_length_n == -1) { ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, "memcached sent invalid length in response \"%V\" " "for key \"%V\"", diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -72,6 +72,8 @@ static void ngx_http_upstream_finalize_r static ngx_int_t ngx_http_upstream_process_header_line(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); +static ngx_int_t ngx_http_upstream_process_content_length(ngx_http_request_t *r, + ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t ngx_http_upstream_process_set_cookie(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t @@ -96,8 +98,6 @@ static ngx_int_t ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t ngx_http_upstream_copy_content_type(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); -static ngx_int_t ngx_http_upstream_copy_content_length(ngx_http_request_t *r, - ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t ngx_http_upstream_copy_last_modified(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t ngx_http_upstream_rewrite_location(ngx_http_request_t *r, @@ -149,9 +149,9 @@ ngx_http_upstream_header_t ngx_http_ups ngx_http_upstream_copy_content_type, 0, 1 }, { ngx_string("Content-Length"), - ngx_http_upstream_process_header_line, + ngx_http_upstream_process_content_length, offsetof(ngx_http_upstream_headers_in_t, content_length), - ngx_http_upstream_copy_content_length, 0, 0 }, + ngx_http_upstream_ignore_header_line, 0, 0 }, { ngx_string("Date"), ngx_http_upstream_process_header_line, @@ -396,6 +396,8 @@ ngx_http_upstream_create(ngx_http_reques r->cache = NULL; #endif + u->headers_in.content_length_n = -1; + return NGX_OK; } @@ -800,6 +802,7 @@ ngx_http_upstream_cache_send(ngx_http_re u->buffer.pos += c->header_start; ngx_memzero(&u->headers_in, sizeof(ngx_http_upstream_headers_in_t)); + u->headers_in.content_length_n = -1; if (ngx_list_init(&u->headers_in.headers, r->pool, 8, sizeof(ngx_table_elt_t)) @@ -1295,6 +1298,7 @@ ngx_http_upstream_reinit(ngx_http_reques } ngx_memzero(&u->headers_in, sizeof(ngx_http_upstream_headers_in_t)); + u->headers_in.content_length_n = -1; if (ngx_list_init(&u->headers_in.headers, r->pool, 8, sizeof(ngx_table_elt_t)) @@ -1936,10 +1940,10 @@ ngx_http_upstream_process_headers(ngx_ht r->headers_out.status = u->headers_in.status_n; r->headers_out.status_line = u->headers_in.status_line; - u->headers_in.content_length_n = r->headers_out.content_length_n; - - if (r->headers_out.content_length_n != -1) { - u->length = (size_t) r->headers_out.content_length_n; + r->headers_out.content_length_n = u->headers_in.content_length_n; + + if (u->headers_in.content_length_n != -1) { + u->length = (size_t) u->headers_in.content_length_n; } else { u->length = NGX_MAX_SIZE_T_VALUE; @@ -3078,6 +3082,21 @@ ngx_http_upstream_ignore_header_line(ngx static ngx_int_t +ngx_http_upstream_process_content_length(ngx_http_request_t *r, + ngx_table_elt_t *h, ngx_uint_t offset) +{ + ngx_http_upstream_t *u; + + u = r->upstream; + + u->headers_in.content_length = h; + u->headers_in.content_length_n = ngx_atoof(h->value.data, h->value.len); + + return NGX_OK; +} + + +static ngx_int_t ngx_http_upstream_process_set_cookie(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset) { @@ -3446,26 +3465,6 @@ ngx_http_upstream_copy_content_type(ngx_ static ngx_int_t -ngx_http_upstream_copy_content_length(ngx_http_request_t *r, ngx_table_elt_t *h, - ngx_uint_t offset) -{ - ngx_table_elt_t *ho; - - ho = ngx_list_push(&r->headers_out.headers); - if (ho == NULL) { - return NGX_ERROR; - } - - *ho = *h; - - r->headers_out.content_length = ho; - r->headers_out.content_length_n = ngx_atoof(h->value.data, h->value.len); - - return NGX_OK; -} - - -static ngx_int_t ngx_http_upstream_copy_last_modified(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset) { # HG changeset patch # User Maxim Dounin # Date 1314890513 -14400 # Node ID 96af2cd53e48eecd616f766fa03912b8b83ea01c # Parent cae8308da3d56319fa972d2caae0e9d2c8b3d718 Upstream: r->upstream->length type change to off_t. Previous use of size_t may cause wierd effects on 32bit platforms with certain big responses transferred in unbuffered mode. Nuke "if (size > u->length)" check as it's not usefull anyway (preread body data isn't subject to this check) and now requires additional check for u->length being positive. diff --git a/src/http/modules/ngx_http_memcached_module.c b/src/http/modules/ngx_http_memcached_module.c --- a/src/http/modules/ngx_http_memcached_module.c +++ b/src/http/modules/ngx_http_memcached_module.c @@ -407,7 +407,7 @@ ngx_http_memcached_filter(void *data, ss u = ctx->request->upstream; b = &u->buffer; - if (u->length == ctx->rest) { + if (u->length == (ssize_t) ctx->rest) { if (ngx_strncmp(b->last, ngx_http_memcached_end + NGX_HTTP_MEMCACHED_END - ctx->rest, diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -1942,12 +1942,7 @@ ngx_http_upstream_process_headers(ngx_ht r->headers_out.content_length_n = u->headers_in.content_length_n; - if (u->headers_in.content_length_n != -1) { - u->length = (size_t) u->headers_in.content_length_n; - - } else { - u->length = NGX_MAX_SIZE_T_VALUE; - } + u->length = u->headers_in.content_length_n; return NGX_OK; } @@ -2419,10 +2414,6 @@ ngx_http_upstream_process_non_buffered_r size = b->end - b->last; - if (size > u->length) { - size = u->length; - } - if (size && upstream->read->ready) { n = upstream->recv(upstream, b->last, size); @@ -2519,7 +2510,7 @@ ngx_http_upstream_non_buffered_filter(vo cl->buf->last = b->last; cl->buf->tag = u->output.tag; - if (u->length == NGX_MAX_SIZE_T_VALUE) { + if (u->length == -1) { return NGX_OK; } diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h +++ b/src/http/ngx_http_upstream.h @@ -267,7 +267,7 @@ struct ngx_http_upstream_s { ngx_http_upstream_resolved_t *resolved; ngx_buf_t buffer; - size_t length; + off_t length; ngx_chain_t *out_bufs; ngx_chain_t *busy_bufs; # HG changeset patch # User Maxim Dounin # Date 1297818715 -10800 # Node ID d637b79ad4b9f2ed20e100e93c7f2970d553eaf5 # Parent 96af2cd53e48eecd616f766fa03912b8b83ea01c Upstream: pipe length and input_filter_init in buffered mode. As long as ngx_event_pipe() has more data read from upstream than specified in p->length it's passed to input filter even if buffer isn't yet full. This allows to process data with known length without relying on connection close to signal data end. By default p->length is set to -1 in upstream module, i.e. end of data is indicated by connection close. To set it from per-protocol handlers upstream input_filter_init() now called in buffered mode (as well as in unbuffered mode). diff --git a/src/event/ngx_event_pipe.c b/src/event/ngx_event_pipe.c --- a/src/event/ngx_event_pipe.c +++ b/src/event/ngx_event_pipe.c @@ -392,8 +392,31 @@ ngx_event_pipe_read_upstream(ngx_event_p cl->buf->file_last - cl->buf->file_pos); } + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0, + "pipe length: %O", p->length); + #endif + if (p->free_raw_bufs && p->length != -1) { + cl = p->free_raw_bufs; + + if (cl->buf->last - cl->buf->pos >= p->length) { + + /* STUB */ cl->buf->num = p->num++; + + if (p->input_filter(p, cl->buf) == NGX_ERROR) { + return NGX_ABORT; + } + + p->free_raw_bufs = cl->next; + } + } + + if (p->length == 0) { + p->upstream_done = 1; + p->read = 1; + } + if ((p->upstream_eof || p->upstream_error) && p->free_raw_bufs) { /* STUB */ p->free_raw_bufs->buf->num = p->num++; @@ -848,6 +871,12 @@ ngx_event_pipe_copy_input_filter(ngx_eve } p->last_in = &cl->next; + if (p->length == -1) { + return NGX_OK; + } + + p->length -= b->last - b->pos; + return NGX_OK; } diff --git a/src/event/ngx_event_pipe.h b/src/event/ngx_event_pipe.h --- a/src/event/ngx_event_pipe.h +++ b/src/event/ngx_event_pipe.h @@ -65,6 +65,7 @@ struct ngx_event_pipe_s { ssize_t busy_size; off_t read_length; + off_t length; off_t max_temp_file_size; ssize_t temp_file_write_size; diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -2304,6 +2304,15 @@ ngx_http_upstream_send_response(ngx_http p->send_timeout = clcf->send_timeout; p->send_lowat = clcf->send_lowat; + p->length = -1; + + if (u->input_filter_init + && u->input_filter_init(p->input_ctx) != NGX_OK) + { + ngx_http_upstream_finalize_request(r, u, 0); + return; + } + u->read_event_handler = ngx_http_upstream_process_upstream; r->write_event_handler = ngx_http_upstream_process_downstream; # HG changeset patch # User Maxim Dounin # Date 1297818715 -10800 # Node ID 9b5e413d92336e05c81d19be7ba89bac0971e025 # Parent d637b79ad4b9f2ed20e100e93c7f2970d553eaf5 Upstream: keepalive flag. This patch introduces r->upstream->keepalive flag, which is set by protocol handlers if connection to upstream is in good state and can be kept alive. diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -1297,6 +1297,8 @@ ngx_http_upstream_reinit(ngx_http_reques return NGX_ERROR; } + u->keepalive = 0; + ngx_memzero(&u->headers_in, sizeof(ngx_http_upstream_headers_in_t)); u->headers_in.content_length_n = -1; @@ -2006,6 +2008,11 @@ ngx_http_upstream_process_body_in_memory } } + if (u->length == 0) { + ngx_http_upstream_finalize_request(r, u, 0); + return; + } + if (ngx_handle_read_event(rev, 0) != NGX_OK) { ngx_http_upstream_finalize_request(r, u, NGX_ERROR); return; diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h +++ b/src/http/ngx_http_upstream.h @@ -308,6 +308,7 @@ struct ngx_http_upstream_s { #endif unsigned buffering:1; + unsigned keepalive:1; unsigned request_sent:1; unsigned header_sent:1; # HG changeset patch # User Maxim Dounin # Date 1315238484 -14400 # Node ID 27308fbadb5e9bf0277d9f56e20eeba44b648f68 # Parent 9b5e413d92336e05c81d19be7ba89bac0971e025 Keepalive support in memcached. diff --git a/src/http/modules/ngx_http_memcached_module.c b/src/http/modules/ngx_http_memcached_module.c --- a/src/http/modules/ngx_http_memcached_module.c +++ b/src/http/modules/ngx_http_memcached_module.c @@ -366,6 +366,7 @@ found: u->headers_in.status_n = 404; u->state->status = 404; + u->keepalive = 1; return NGX_OK; } @@ -426,6 +427,10 @@ ngx_http_memcached_filter(void *data, ss u->length -= bytes; ctx->rest -= bytes; + if (u->length == 0) { + u->keepalive = 1; + } + return NGX_OK; } @@ -463,6 +468,13 @@ ngx_http_memcached_filter(void *data, ss if (ngx_strncmp(last, ngx_http_memcached_end, b->last - last) != 0) { ngx_log_error(NGX_LOG_ERR, ctx->request->connection->log, 0, "memcached sent invalid trailer"); + + b->last = last; + cl->buf->last = last; + u->length = 0; + ctx->rest = 0; + + return NGX_OK; } ctx->rest -= b->last - last; @@ -470,6 +482,10 @@ ngx_http_memcached_filter(void *data, ss cl->buf->last = last; u->length = ctx->rest; + if (u->length == 0) { + u->keepalive = 1; + } + return NGX_OK; } # HG changeset patch # User Maxim Dounin # Date 1315238484 -14400 # Node ID fc3632209583b20ed2fcede98c8dee0a1ca11d0b # Parent 27308fbadb5e9bf0277d9f56e20eeba44b648f68 Keepalive support in fastcgi. By default follow the old behaviour, i.e. FASTCGI_KEEP_CONN flag isn't set in request and application is responsible for closing connection once request is done. To keep connections alive fastcgi_keep_conn must be activated. diff --git a/src/http/modules/ngx_http_fastcgi_module.c b/src/http/modules/ngx_http_fastcgi_module.c --- a/src/http/modules/ngx_http_fastcgi_module.c +++ b/src/http/modules/ngx_http_fastcgi_module.c @@ -26,6 +26,8 @@ typedef struct { ngx_hash_t headers_hash; ngx_uint_t header_params; + ngx_flag_t keep_conn; + #if (NGX_HTTP_CACHE) ngx_http_complex_value_t cache_key; #endif @@ -77,6 +79,8 @@ typedef struct { #define NGX_HTTP_FASTCGI_RESPONDER 1 +#define NGX_HTTP_FASTCGI_KEEP_CONN 1 + #define NGX_HTTP_FASTCGI_BEGIN_REQUEST 1 #define NGX_HTTP_FASTCGI_ABORT_REQUEST 2 #define NGX_HTTP_FASTCGI_END_REQUEST 3 @@ -130,6 +134,7 @@ static ngx_int_t ngx_http_fastcgi_create static ngx_int_t ngx_http_fastcgi_create_request(ngx_http_request_t *r); static ngx_int_t ngx_http_fastcgi_reinit_request(ngx_http_request_t *r); static ngx_int_t ngx_http_fastcgi_process_header(ngx_http_request_t *r); +static ngx_int_t ngx_http_fastcgi_input_filter_init(void *data); static ngx_int_t ngx_http_fastcgi_input_filter(ngx_event_pipe_t *p, ngx_buf_t *buf); static ngx_int_t ngx_http_fastcgi_process_record(ngx_http_request_t *r, @@ -437,6 +442,13 @@ static ngx_command_t ngx_http_fastcgi_c offsetof(ngx_http_fastcgi_loc_conf_t, catch_stderr), NULL }, + { ngx_string("fastcgi_keep_conn"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_FLAG, + ngx_conf_set_flag_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_fastcgi_loc_conf_t, keep_conn), + NULL }, + ngx_null_command }; @@ -600,6 +612,8 @@ ngx_http_fastcgi_handler(ngx_http_reques u->pipe->input_filter = ngx_http_fastcgi_input_filter; u->pipe->input_ctx = r; + u->input_filter_init = ngx_http_fastcgi_input_filter_init; + rc = ngx_http_read_client_request_body(r, ngx_http_upstream_init); if (rc >= NGX_HTTP_SPECIAL_RESPONSE) { @@ -841,6 +855,9 @@ ngx_http_fastcgi_create_request(ngx_http cl->buf = b; + ngx_http_fastcgi_request_start.br.flags = + flcf->keep_conn ? NGX_HTTP_FASTCGI_KEEP_CONN : 0; + ngx_memcpy(b->pos, &ngx_http_fastcgi_request_start, sizeof(ngx_http_fastcgi_request_start_t)); @@ -1574,14 +1591,30 @@ ngx_http_fastcgi_process_header(ngx_http static ngx_int_t +ngx_http_fastcgi_input_filter_init(void *data) +{ + ngx_http_request_t *r = data; + ngx_http_fastcgi_loc_conf_t *flcf; + + flcf = ngx_http_get_module_loc_conf(r, ngx_http_fastcgi_module); + + r->upstream->pipe->length = flcf->keep_conn ? + (off_t) sizeof(ngx_http_fastcgi_header_t) : -1; + + return NGX_OK; +} + + +static ngx_int_t ngx_http_fastcgi_input_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) { - u_char *m, *msg; - ngx_int_t rc; - ngx_buf_t *b, **prev; - ngx_chain_t *cl; - ngx_http_request_t *r; - ngx_http_fastcgi_ctx_t *f; + u_char *m, *msg; + ngx_int_t rc; + ngx_buf_t *b, **prev; + ngx_chain_t *cl; + ngx_http_request_t *r; + ngx_http_fastcgi_ctx_t *f; + ngx_http_fastcgi_loc_conf_t *flcf; if (buf->pos == buf->last) { return NGX_OK; @@ -1589,6 +1622,7 @@ ngx_http_fastcgi_input_filter(ngx_event_ r = p->input_ctx; f = ngx_http_get_module_ctx(r, ngx_http_fastcgi_module); + flcf = ngx_http_get_module_loc_conf(r, ngx_http_fastcgi_module); b = NULL; prev = &buf->shadow; @@ -1611,7 +1645,10 @@ ngx_http_fastcgi_input_filter(ngx_event_ if (f->type == NGX_HTTP_FASTCGI_STDOUT && f->length == 0) { f->state = ngx_http_fastcgi_st_version; - p->upstream_done = 1; + + if (!flcf->keep_conn) { + p->upstream_done = 1; + } ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0, "http fastcgi closed stdout"); @@ -1623,6 +1660,10 @@ ngx_http_fastcgi_input_filter(ngx_event_ f->state = ngx_http_fastcgi_st_version; p->upstream_done = 1; + if (flcf->keep_conn) { + r->upstream->keepalive = 1; + } + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0, "http fastcgi sent end request"); @@ -1781,6 +1822,23 @@ ngx_http_fastcgi_input_filter(ngx_event_ } + if (flcf->keep_conn) { + + /* set p->length, minimal amount of data we want to see */ + + if (f->state < ngx_http_fastcgi_st_data) { + p->length = 1; + + } else if (f->state == ngx_http_fastcgi_st_padding) { + p->length = f->padding; + + } else { + /* ngx_http_fastcgi_st_data */ + + p->length = f->length; + } + } + if (b) { b->shadow = buf; b->last_shadow = 1; @@ -2011,6 +2069,8 @@ ngx_http_fastcgi_create_loc_conf(ngx_con conf->catch_stderr = NGX_CONF_UNSET_PTR; + conf->keep_conn = NGX_CONF_UNSET; + ngx_str_set(&conf->upstream.module, "fastcgi"); return conf; @@ -2254,6 +2314,8 @@ ngx_http_fastcgi_merge_loc_conf(ngx_conf ngx_conf_merge_ptr_value(conf->catch_stderr, prev->catch_stderr, NULL); + ngx_conf_merge_value(conf->keep_conn, prev->keep_conn, 0); + ngx_conf_merge_str_value(conf->index, prev->index, ""); # HG changeset patch # User Maxim Dounin # Date 1297818715 -10800 # Node ID cacc4bb6e4e41b6f0153c523053f4cbd4c817772 # Parent fc3632209583b20ed2fcede98c8dee0a1ca11d0b Upstream: process Transfer-Encoding header and detect chunked one. diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -91,6 +91,9 @@ static ngx_int_t ngx_http_upstream_proce ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t ngx_http_upstream_process_charset(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); +static ngx_int_t + ngx_http_upstream_process_transfer_encoding(ngx_http_request_t *r, + ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t ngx_http_upstream_copy_header_line(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t @@ -247,6 +250,10 @@ ngx_http_upstream_header_t ngx_http_ups ngx_http_upstream_process_charset, 0, ngx_http_upstream_copy_header_line, 0, 0 }, + { ngx_string("Transfer-Encoding"), + ngx_http_upstream_process_transfer_encoding, 0, + ngx_http_upstream_ignore_header_line, 0, 0 }, + #if (NGX_HTTP_GZIP) { ngx_string("Content-Encoding"), ngx_http_upstream_process_header_line, @@ -3365,6 +3372,23 @@ ngx_http_upstream_process_charset(ngx_ht static ngx_int_t +ngx_http_upstream_process_transfer_encoding(ngx_http_request_t *r, + ngx_table_elt_t *h, ngx_uint_t offset) +{ + r->upstream->headers_in.transfer_encoding = h; + + if (ngx_strlcasestrn(h->value.data, h->value.data + h->value.len, + (u_char *) "chunked", 7 - 1) + != NULL) + { + r->upstream->headers_in.chunked = 1; + } + + return NGX_OK; +} + + +static ngx_int_t ngx_http_upstream_copy_header_line(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset) { diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h +++ b/src/http/ngx_http_upstream.h @@ -217,6 +217,7 @@ typedef struct { ngx_table_elt_t *location; ngx_table_elt_t *accept_ranges; ngx_table_elt_t *www_authenticate; + ngx_table_elt_t *transfer_encoding; #if (NGX_HTTP_GZIP) ngx_table_elt_t *content_encoding; @@ -225,6 +226,8 @@ typedef struct { off_t content_length_n; ngx_array_t cache_control; + + unsigned chunked:1; } ngx_http_upstream_headers_in_t; # HG changeset patch # User Maxim Dounin # Date 1315238484 -14400 # Node ID 41e0e5f2405eba519b93059bc2d2436467726446 # Parent cacc4bb6e4e41b6f0153c523053f4cbd4c817772 Upstream: process Connection header and detect close token. diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -91,6 +91,8 @@ static ngx_int_t ngx_http_upstream_proce ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t ngx_http_upstream_process_charset(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); +static ngx_int_t ngx_http_upstream_process_connection(ngx_http_request_t *r, + ngx_table_elt_t *h, ngx_uint_t offset); static ngx_int_t ngx_http_upstream_process_transfer_encoding(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset); @@ -218,7 +220,7 @@ ngx_http_upstream_header_t ngx_http_ups offsetof(ngx_http_headers_out_t, accept_ranges), 1 }, { ngx_string("Connection"), - ngx_http_upstream_ignore_header_line, 0, + ngx_http_upstream_process_connection, 0, ngx_http_upstream_ignore_header_line, 0, 0 }, { ngx_string("Keep-Alive"), @@ -3372,6 +3374,23 @@ ngx_http_upstream_process_charset(ngx_ht static ngx_int_t +ngx_http_upstream_process_connection(ngx_http_request_t *r, ngx_table_elt_t *h, + ngx_uint_t offset) +{ + r->upstream->headers_in.connection = h; + + if (ngx_strlcasestrn(h->value.data, h->value.data + h->value.len, + (u_char *) "close", 5 - 1) + != NULL) + { + r->upstream->headers_in.connection_close = 1; + } + + return NGX_OK; +} + + +static ngx_int_t ngx_http_upstream_process_transfer_encoding(ngx_http_request_t *r, ngx_table_elt_t *h, ngx_uint_t offset) { diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h --- a/src/http/ngx_http_upstream.h +++ b/src/http/ngx_http_upstream.h @@ -227,6 +227,7 @@ typedef struct { ngx_array_t cache_control; + unsigned connection_close:1; unsigned chunked:1; } ngx_http_upstream_headers_in_t; # HG changeset patch # User Maxim Dounin # Date 1315238484 -14400 # Node ID 51f7098b91bce777990457dc87ae959db38ccf23 # Parent 41e0e5f2405eba519b93059bc2d2436467726446 Protocol version parsing in ngx_http_parse_status_line(). Once we know protocol version, set u->headers_in.connection_close to indicate implicitly assumed connection close with HTTP before 1.1. diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c --- a/src/http/modules/ngx_http_proxy_module.c +++ b/src/http/modules/ngx_http_proxy_module.c @@ -1210,6 +1210,7 @@ ngx_http_proxy_process_status_line(ngx_h r->http_version = NGX_HTTP_VERSION_9; u->state->status = NGX_HTTP_OK; + u->headers_in.connection_close = 1; return NGX_OK; } @@ -1234,6 +1235,10 @@ ngx_http_proxy_process_status_line(ngx_h "http proxy status %ui \"%V\"", u->headers_in.status_n, &u->headers_in.status_line); + if (ctx->status.http_version < NGX_HTTP_VERSION_11) { + u->headers_in.connection_close = 1; + } + u->process_header = ngx_http_proxy_process_header; return ngx_http_proxy_process_header(r); diff --git a/src/http/ngx_http.h b/src/http/ngx_http.h --- a/src/http/ngx_http.h +++ b/src/http/ngx_http.h @@ -52,6 +52,7 @@ struct ngx_http_log_ctx_s { typedef struct { + ngx_uint_t http_version; ngx_uint_t code; ngx_uint_t count; u_char *start; diff --git a/src/http/ngx_http_parse.c b/src/http/ngx_http_parse.c --- a/src/http/ngx_http_parse.c +++ b/src/http/ngx_http_parse.c @@ -1403,6 +1403,7 @@ ngx_http_parse_status_line(ngx_http_requ return NGX_ERROR; } + r->http_major = ch - '0'; state = sw_major_digit; break; @@ -1417,6 +1418,7 @@ ngx_http_parse_status_line(ngx_http_requ return NGX_ERROR; } + r->http_major = r->http_major * 10 + ch - '0'; break; /* the first digit of minor HTTP version */ @@ -1425,6 +1427,7 @@ ngx_http_parse_status_line(ngx_http_requ return NGX_ERROR; } + r->http_minor = ch - '0'; state = sw_minor_digit; break; @@ -1439,6 +1442,7 @@ ngx_http_parse_status_line(ngx_http_requ return NGX_ERROR; } + r->http_minor = r->http_minor * 10 + ch - '0'; break; /* HTTP status code */ @@ -1516,6 +1520,7 @@ done: status->end = p; } + status->http_version = r->http_major * 1000 + r->http_minor; r->state = sw_start; return NGX_OK; # HG changeset patch # User Maxim Dounin # Date 1315238484 -14400 # Node ID 80f8b493d819878f3311788fbf94058cd4cb79de # Parent 51f7098b91bce777990457dc87ae959db38ccf23 Proxy: basic HTTP/1.1 support (including keepalive). By default we still send requests using HTTP/1.0. This may be changed with new proxy_http_version directive. diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c --- a/src/http/modules/ngx_http_proxy_module.c +++ b/src/http/modules/ngx_http_proxy_module.c @@ -71,6 +71,8 @@ typedef struct { ngx_flag_t redirect; + ngx_uint_t http_version; + ngx_uint_t headers_hash_max_size; ngx_uint_t headers_hash_bucket_size; } ngx_http_proxy_loc_conf_t; @@ -80,6 +82,12 @@ typedef struct { ngx_http_status_t status; ngx_http_proxy_vars_t vars; size_t internal_body_length; + + ngx_uint_t state; + off_t size; + off_t length; + + ngx_uint_t head; /* unsigned head:1 */ } ngx_http_proxy_ctx_t; @@ -92,6 +100,15 @@ static ngx_int_t ngx_http_proxy_create_r static ngx_int_t ngx_http_proxy_reinit_request(ngx_http_request_t *r); static ngx_int_t ngx_http_proxy_process_status_line(ngx_http_request_t *r); static ngx_int_t ngx_http_proxy_process_header(ngx_http_request_t *r); +static ngx_int_t ngx_http_proxy_input_filter_init(void *data); +static ngx_int_t ngx_http_proxy_copy_filter(ngx_event_pipe_t *p, + ngx_buf_t *buf); +static ngx_int_t ngx_http_proxy_chunked_filter(ngx_event_pipe_t *p, + ngx_buf_t *buf); +static ngx_int_t ngx_http_proxy_non_buffered_copy_filter(void *data, + ssize_t bytes); +static ngx_int_t ngx_http_proxy_non_buffered_chunked_filter(void *data, + ssize_t bytes); static void ngx_http_proxy_abort_request(ngx_http_request_t *r); static void ngx_http_proxy_finalize_request(ngx_http_request_t *r, ngx_int_t rc); @@ -157,6 +174,13 @@ static ngx_conf_bitmask_t ngx_http_prox }; +static ngx_conf_enum_t ngx_http_proxy_http_version[] = { + { ngx_string("1.0"), NGX_HTTP_VERSION_10 }, + { ngx_string("1.1"), NGX_HTTP_VERSION_11 }, + { ngx_null_string, 0 } +}; + + ngx_module_t ngx_http_proxy_module; @@ -432,6 +456,13 @@ static ngx_command_t ngx_http_proxy_com offsetof(ngx_http_proxy_loc_conf_t, upstream.ignore_headers), &ngx_http_upstream_ignore_headers_masks }, + { ngx_string("proxy_http_version"), + NGX_HTTP_MAIN_CONF|NGX_HTTP_SRV_CONF|NGX_HTTP_LOC_CONF|NGX_CONF_TAKE1, + ngx_conf_set_enum_slot, + NGX_HTTP_LOC_CONF_OFFSET, + offsetof(ngx_http_proxy_loc_conf_t, http_version), + &ngx_http_proxy_http_version }, + #if (NGX_HTTP_SSL) { ngx_string("proxy_ssl_session_reuse"), @@ -479,6 +510,7 @@ ngx_module_t ngx_http_proxy_module = { static char ngx_http_proxy_version[] = " HTTP/1.0" CRLF; +static char ngx_http_proxy_version_11[] = " HTTP/1.1" CRLF; static ngx_keyval_t ngx_http_proxy_headers[] = { @@ -486,6 +518,7 @@ static ngx_keyval_t ngx_http_proxy_head { ngx_string("Connection"), ngx_string("close") }, { ngx_string("Keep-Alive"), ngx_string("") }, { ngx_string("Expect"), ngx_string("") }, + { ngx_string("Upgrade"), ngx_string("") }, { ngx_null_string, ngx_null_string } }; @@ -610,7 +643,12 @@ ngx_http_proxy_handler(ngx_http_request_ return NGX_HTTP_INTERNAL_SERVER_ERROR; } - u->pipe->input_filter = ngx_event_pipe_copy_input_filter; + u->pipe->input_filter = ngx_http_proxy_copy_filter; + u->pipe->input_ctx = r; + + u->input_filter_init = ngx_http_proxy_input_filter_init; + u->input_filter = ngx_http_proxy_non_buffered_copy_filter; + u->input_filter_ctx = r; u->accel = 1; @@ -866,14 +904,20 @@ ngx_http_proxy_create_request(ngx_http_r method.len++; } + ctx = ngx_http_get_module_ctx(r, ngx_http_proxy_module); + + if (method.len == 5 + && ngx_strncasecmp(method.data, (u_char *) "HEAD ", 5) == 0) + { + ctx->head = 1; + } + len = method.len + sizeof(ngx_http_proxy_version) - 1 + sizeof(CRLF) - 1; escape = 0; loc_len = 0; unparsed_uri = 0; - ctx = ngx_http_get_module_ctx(r, ngx_http_proxy_module); - if (plcf->proxy_lengths) { uri_len = ctx->vars.uri.len; @@ -1009,8 +1053,14 @@ ngx_http_proxy_create_request(ngx_http_r u->uri.len = b->last - u->uri.data; - b->last = ngx_cpymem(b->last, ngx_http_proxy_version, - sizeof(ngx_http_proxy_version) - 1); + if (plcf->http_version == NGX_HTTP_VERSION_11) { + b->last = ngx_cpymem(b->last, ngx_http_proxy_version_11, + sizeof(ngx_http_proxy_version_11) - 1); + + } else { + b->last = ngx_cpymem(b->last, ngx_http_proxy_version, + sizeof(ngx_http_proxy_version) - 1); + } ngx_memzero(&e, sizeof(ngx_http_script_engine_t)); @@ -1158,8 +1208,11 @@ ngx_http_proxy_reinit_request(ngx_http_r ctx->status.count = 0; ctx->status.start = NULL; ctx->status.end = NULL; + ctx->state = 0; r->upstream->process_header = ngx_http_proxy_process_status_line; + r->upstream->pipe->input_filter = ngx_http_proxy_copy_filter; + r->upstream->input_filter = ngx_http_proxy_non_buffered_copy_filter; r->state = 0; return NGX_OK; @@ -1250,6 +1303,8 @@ ngx_http_proxy_process_header(ngx_http_r { ngx_int_t rc; ngx_table_elt_t *h; + ngx_http_upstream_t *u; + ngx_http_proxy_ctx_t *ctx; ngx_http_upstream_header_t *hh; ngx_http_upstream_main_conf_t *umcf; @@ -1345,6 +1400,30 @@ ngx_http_proxy_process_header(ngx_http_r h->lowcase_key = (u_char *) "date"; } + /* clear content length if response is chunked */ + + u = r->upstream; + + if (u->headers_in.chunked) { + u->headers_in.content_length_n = -1; + } + + /* + * set u->keepalive if response has no body; this allows to keep + * connections alive in case of r->header_only or X-Accel-Redirect + */ + + ctx = ngx_http_get_module_ctx(r, ngx_http_proxy_module); + + if (u->headers_in.status_n == NGX_HTTP_NO_CONTENT + || u->headers_in.status_n == NGX_HTTP_NOT_MODIFIED + || ctx->head + || (!u->headers_in.chunked + && u->headers_in.content_length_n == 0)) + { + u->keepalive = !u->headers_in.connection_close; + } + return NGX_OK; } @@ -1362,6 +1441,690 @@ ngx_http_proxy_process_header(ngx_http_r } +static ngx_int_t +ngx_http_proxy_input_filter_init(void *data) +{ + ngx_http_request_t *r = data; + ngx_http_upstream_t *u; + ngx_http_proxy_ctx_t *ctx; + + u = r->upstream; + ctx = ngx_http_get_module_ctx(r, ngx_http_proxy_module); + + ngx_log_debug4(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http proxy filter init s:%d h:%d c:%d l:%O", + u->headers_in.status_n, ctx->head, u->headers_in.chunked, + u->headers_in.content_length_n); + + /* as per RFC2616, 4.4 Message Length */ + + if (u->headers_in.status_n == NGX_HTTP_NO_CONTENT + || u->headers_in.status_n == NGX_HTTP_NOT_MODIFIED + || ctx->head) + { + /* 1xx, 204, and 304 and replies to HEAD requests */ + /* no 1xx since we don't send Expect and Upgrade */ + + u->pipe->length = 0; + u->length = 0; + u->keepalive = !u->headers_in.connection_close; + + } else if (u->headers_in.chunked) { + /* chunked */ + + u->pipe->input_filter = ngx_http_proxy_chunked_filter; + u->pipe->length = 3; /* "0" LF LF */ + + u->input_filter = ngx_http_proxy_non_buffered_chunked_filter; + u->length = -1; + + } else if (u->headers_in.content_length_n == 0) { + /* empty body: special case as filter won't be called */ + + u->pipe->length = 0; + u->length = 0; + u->keepalive = !u->headers_in.connection_close; + + } else { + /* content length or connection close */ + + u->pipe->length = u->headers_in.content_length_n; + u->length = u->headers_in.content_length_n; + } + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_proxy_copy_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) +{ + ngx_buf_t *b; + ngx_chain_t *cl; + ngx_http_request_t *r; + + if (buf->pos == buf->last) { + return NGX_OK; + } + + if (p->free) { + cl = p->free; + b = cl->buf; + p->free = cl->next; + ngx_free_chain(p->pool, cl); + + } else { + b = ngx_alloc_buf(p->pool); + if (b == NULL) { + return NGX_ERROR; + } + } + + ngx_memcpy(b, buf, sizeof(ngx_buf_t)); + b->shadow = buf; + b->tag = p->tag; + b->last_shadow = 1; + b->recycled = 1; + buf->shadow = b; + + cl = ngx_alloc_chain_link(p->pool); + if (cl == NULL) { + return NGX_ERROR; + } + + cl->buf = b; + cl->next = NULL; + + ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0, "input buf #%d", b->num); + + if (p->in) { + *p->last_in = cl; + } else { + p->in = cl; + } + p->last_in = &cl->next; + + if (p->length == -1) { + return NGX_OK; + } + + p->length -= b->last - b->pos; + + if (p->length == 0) { + r = p->input_ctx; + p->upstream_done = 1; + r->upstream->keepalive = !r->upstream->headers_in.connection_close; + + } else if (p->length < 0) { + r = p->input_ctx; + p->upstream_done = 1; + + ngx_log_error(NGX_LOG_ERR, r->connection->log, 0, + "upstream sent too many data"); + } + + return NGX_OK; +} + + +static ngx_inline ngx_int_t +ngx_http_proxy_parse_chunked(ngx_http_request_t *r, ngx_buf_t *buf) +{ + u_char *pos, ch, c; + ngx_int_t rc; + ngx_http_proxy_ctx_t *ctx; + enum { + sw_chunk_start = 0, + sw_chunk_size, + sw_chunk_extension, + sw_chunk_extension_almost_done, + sw_chunk_data, + sw_after_data, + sw_after_data_almost_done, + sw_last_chunk_extension, + sw_last_chunk_extension_almost_done, + sw_trailer, + sw_trailer_almost_done, + sw_trailer_header, + sw_trailer_header_almost_done + } state; + + ctx = ngx_http_get_module_ctx(r, ngx_http_proxy_module); + state = ctx->state; + + if (state == sw_chunk_data && ctx->size == 0) { + state = sw_after_data; + } + + rc = NGX_AGAIN; + + for (pos = buf->pos; pos < buf->last; pos++) { + + ch = *pos; + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http proxy chunked byte: %02Xd s:%d", ch, state); + + switch (state) { + + case sw_chunk_start: + if (ch >= '0' && ch <= '9') { + state = sw_chunk_size; + ctx->size = ch - '0'; + break; + } + + c = (u_char) (ch | 0x20); + + if (c >= 'a' && c <= 'f') { + state = sw_chunk_size; + ctx->size = c - 'a' + 10; + break; + } + + goto invalid; + + case sw_chunk_size: + if (ch >= '0' && ch <= '9') { + ctx->size = ctx->size * 16 + (ch - '0'); + break; + } + + c = (u_char) (ch | 0x20); + + if (c >= 'a' && c <= 'f') { + ctx->size = ctx->size * 16 + (c - 'a' + 10); + break; + } + + if (ctx->size == 0) { + + switch (ch) { + case CR: + state = sw_last_chunk_extension_almost_done; + break; + case LF: + state = sw_trailer; + break; + case ';': + state = sw_last_chunk_extension; + break; + default: + goto invalid; + } + + break; + } + + switch (ch) { + case CR: + state = sw_chunk_extension_almost_done; + break; + case LF: + state = sw_chunk_data; + break; + case ';': + state = sw_chunk_extension; + break; + default: + goto invalid; + } + + break; + + case sw_chunk_extension: + switch (ch) { + case CR: + state = sw_chunk_extension_almost_done; + break; + case LF: + state = sw_chunk_data; + } + break; + + case sw_chunk_extension_almost_done: + if (ch == LF) { + state = sw_chunk_data; + break; + } + goto invalid; + + case sw_chunk_data: + rc = NGX_OK; + goto data; + + case sw_after_data: + switch (ch) { + case CR: + state = sw_after_data_almost_done; + break; + case LF: + state = sw_chunk_start; + } + break; + + case sw_after_data_almost_done: + if (ch == LF) { + state = sw_chunk_start; + break; + } + goto invalid; + + case sw_last_chunk_extension: + switch (ch) { + case CR: + state = sw_last_chunk_extension_almost_done; + break; + case LF: + state = sw_trailer; + } + break; + + case sw_last_chunk_extension_almost_done: + if (ch == LF) { + state = sw_trailer; + break; + } + goto invalid; + + case sw_trailer: + switch (ch) { + case CR: + state = sw_trailer_almost_done; + break; + case LF: + goto done; + default: + state = sw_trailer_header; + } + break; + + case sw_trailer_almost_done: + if (ch == LF) { + goto done; + } + goto invalid; + + case sw_trailer_header: + switch (ch) { + case CR: + state = sw_trailer_header_almost_done; + break; + case LF: + state = sw_trailer; + } + break; + + case sw_trailer_header_almost_done: + if (ch == LF) { + state = sw_trailer; + break; + } + goto invalid; + + } + } + +data: + + ctx->state = state; + buf->pos = pos; + + switch (state) { + + case sw_chunk_start: + ctx->length = 3 /* "0" LF LF */; + break; + case sw_chunk_size: + ctx->length = 2 /* LF LF */ + + (ctx->size ? ctx->size + 4 /* LF "0" LF LF */ : 0); + break; + case sw_chunk_extension: + case sw_chunk_extension_almost_done: + ctx->length = 1 /* LF */ + ctx->size + 4 /* LF "0" LF LF */; + break; + case sw_chunk_data: + ctx->length = ctx->size + 4 /* LF "0" LF LF */; + break; + case sw_after_data: + case sw_after_data_almost_done: + ctx->length = 4 /* LF "0" LF LF */; + break; + case sw_last_chunk_extension: + case sw_last_chunk_extension_almost_done: + ctx->length = 2 /* LF LF */; + break; + case sw_trailer: + case sw_trailer_almost_done: + ctx->length = 1 /* LF */; + break; + case sw_trailer_header: + case sw_trailer_header_almost_done: + ctx->length = 2 /* LF LF */; + break; + + } + + return rc; + +done: + + return NGX_DONE; + +invalid: + + ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, + "upstream sent invalid chunked response"); + + return NGX_ERROR; +} + + +static ngx_int_t +ngx_http_proxy_chunked_filter(ngx_event_pipe_t *p, ngx_buf_t *buf) +{ + ngx_int_t rc; + ngx_buf_t *b, **prev; + ngx_chain_t *cl; + ngx_http_request_t *r; + ngx_http_proxy_ctx_t *ctx; + + if (buf->pos == buf->last) { + return NGX_OK; + } + + r = p->input_ctx; + ctx = ngx_http_get_module_ctx(r, ngx_http_proxy_module); + + b = NULL; + prev = &buf->shadow; + + for ( ;; ) { + + rc = ngx_http_proxy_parse_chunked(r, buf); + + if (rc == NGX_OK) { + + /* a chunk has been parsed successfully */ + + if (p->free) { + cl = p->free; + b = cl->buf; + p->free = cl->next; + ngx_free_chain(p->pool, cl); + + } else { + b = ngx_alloc_buf(p->pool); + if (b == NULL) { + return NGX_ERROR; + } + } + + ngx_memzero(b, sizeof(ngx_buf_t)); + + b->pos = buf->pos; + b->start = buf->start; + b->end = buf->end; + b->tag = p->tag; + b->temporary = 1; + b->recycled = 1; + + *prev = b; + prev = &b->shadow; + + cl = ngx_alloc_chain_link(p->pool); + if (cl == NULL) { + return NGX_ERROR; + } + + cl->buf = b; + cl->next = NULL; + + if (p->in) { + *p->last_in = cl; + } else { + p->in = cl; + } + p->last_in = &cl->next; + + /* STUB */ b->num = buf->num; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, p->log, 0, + "input buf #%d %p", b->num, b->pos); + + if (buf->last - buf->pos >= ctx->size) { + + buf->pos += ctx->size; + b->last = buf->pos; + ctx->size = 0; + + continue; + } + + ctx->size -= buf->last - buf->pos; + buf->pos = buf->last; + b->last = buf->last; + + continue; + } + + if (rc == NGX_DONE) { + + /* a whole response has been parsed successfully */ + + p->upstream_done = 1; + r->upstream->keepalive = !r->upstream->headers_in.connection_close; + + break; + } + + if (rc == NGX_AGAIN) { + + /* set p->length, minimal amount of data we want to see */ + + p->length = ctx->length; + + break; + } + + /* invalid response */ + + ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, + "upstream sent invalid chunked response"); + + return NGX_ERROR; + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http proxy chunked state %d, length %d", + ctx->state, p->length); + + if (b) { + b->shadow = buf; + b->last_shadow = 1; + + ngx_log_debug2(NGX_LOG_DEBUG_EVENT, p->log, 0, + "input buf %p %z", b->pos, b->last - b->pos); + + return NGX_OK; + } + + /* there is no data record in the buf, add it to free chain */ + + if (ngx_event_pipe_add_free_buf(p, buf) != NGX_OK) { + return NGX_ERROR; + } + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_proxy_non_buffered_copy_filter(void *data, ssize_t bytes) +{ + ngx_http_request_t *r = data; + + ngx_buf_t *b; + ngx_chain_t *cl, **ll; + ngx_http_upstream_t *u; + + u = r->upstream; + + for (cl = u->out_bufs, ll = &u->out_bufs; cl; cl = cl->next) { + ll = &cl->next; + } + + cl = ngx_chain_get_free_buf(r->pool, &u->free_bufs); + if (cl == NULL) { + return NGX_ERROR; + } + + *ll = cl; + + cl->buf->flush = 1; + cl->buf->memory = 1; + + b = &u->buffer; + + cl->buf->pos = b->last; + b->last += bytes; + cl->buf->last = b->last; + cl->buf->tag = u->output.tag; + + if (u->length == -1) { + return NGX_OK; + } + + u->length -= bytes; + + if (u->length == 0) { + u->keepalive = !u->headers_in.connection_close; + } + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_proxy_non_buffered_chunked_filter(void *data, ssize_t bytes) +{ + ngx_http_request_t *r = data; + + ngx_int_t rc; + ngx_buf_t *b, *buf; + ngx_chain_t *cl, **ll; + ngx_http_upstream_t *u; + ngx_http_proxy_ctx_t *ctx; + + ctx = ngx_http_get_module_ctx(r, ngx_http_proxy_module); + u = r->upstream; + buf = &u->buffer; + + buf->pos = buf->last; + buf->last += bytes; + + for (cl = u->out_bufs, ll = &u->out_bufs; cl; cl = cl->next) { + ll = &cl->next; + } + + for ( ;; ) { + + rc = ngx_http_proxy_parse_chunked(r, buf); + + if (rc == NGX_OK) { + + /* a chunk has been parsed successfully */ + + cl = ngx_chain_get_free_buf(r->pool, &u->free_bufs); + if (cl == NULL) { + return NGX_ERROR; + } + + *ll = cl; + ll = &cl->next; + + b = cl->buf; + + b->flush = 1; + b->memory = 1; + + b->pos = buf->pos; + b->tag = u->output.tag; + + if (buf->last - buf->pos >= ctx->size) { + buf->pos += ctx->size; + b->last = buf->pos; + ctx->size = 0; + + } else { + ctx->size -= buf->last - buf->pos; + buf->pos = buf->last; + b->last = buf->last; + } + + ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http proxy out buf %p %z", + b->pos, b->last - b->pos); + + continue; + } + + if (rc == NGX_DONE) { + + /* a whole response has been parsed successfully */ + + u->keepalive = !u->headers_in.connection_close; + u->length = 0; + + break; + } + + if (rc == NGX_AGAIN) { + break; + } + + /* invalid response */ + + ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0, + "upstream sent invalid chunked response"); + + return NGX_ERROR; + } + + /* provide continuous buffer for subrequests in memory */ + + if (r->subrequest_in_memory) { + + cl = u->out_bufs; + + if (cl) { + buf->pos = cl->buf->pos; + } + + buf->last = buf->pos; + + for (cl = u->out_bufs; cl; cl = cl->next) { + ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "http proxy in memory %p-%p %uz", + cl->buf->pos, cl->buf->last, ngx_buf_size(cl->buf)); + + if (buf->last == cl->buf->pos) { + buf->last = cl->buf->last; + continue; + } + + buf->last = ngx_movemem(buf->last, cl->buf->pos, + cl->buf->last - cl->buf->pos); + + cl->buf->pos = buf->last - (cl->buf->last - cl->buf->pos); + cl->buf->last = buf->last; + } + } + + return NGX_OK; +} + + static void ngx_http_proxy_abort_request(ngx_http_request_t *r) { @@ -1710,6 +2473,8 @@ ngx_http_proxy_create_loc_conf(ngx_conf_ conf->redirect = NGX_CONF_UNSET; conf->upstream.change_buffering = 1; + conf->http_version = NGX_CONF_UNSET_UINT; + conf->headers_hash_max_size = NGX_CONF_UNSET_UINT; conf->headers_hash_bucket_size = NGX_CONF_UNSET_UINT; @@ -2013,6 +2778,9 @@ ngx_http_proxy_merge_loc_conf(ngx_conf_t } #endif + ngx_conf_merge_uint_value(conf->http_version, prev->http_version, + NGX_HTTP_VERSION_10); + ngx_conf_merge_uint_value(conf->headers_hash_max_size, prev->headers_hash_max_size, 512); # HG changeset patch # User Maxim Dounin # Date 1315238484 -14400 # Node ID 669380cdad2c55cd34c5a5c986c73787faf02ba1 # Parent 80f8b493d819878f3311788fbf94058cd4cb79de Upstream keepalive module. diff --git a/auto/modules b/auto/modules --- a/auto/modules +++ b/auto/modules @@ -339,6 +339,11 @@ if [ $HTTP_UPSTREAM_IP_HASH = YES ]; the HTTP_SRCS="$HTTP_SRCS $HTTP_UPSTREAM_IP_HASH_SRCS" fi +if [ $HTTP_UPSTREAM_KEEPALIVE = YES ]; then + HTTP_MODULES="$HTTP_MODULES $HTTP_UPSTREAM_KEEPALIVE_MODULE" + HTTP_SRCS="$HTTP_SRCS $HTTP_UPSTREAM_KEEPALIVE_SRCS" +fi + if [ $HTTP_STUB_STATUS = YES ]; then have=NGX_STAT_STUB . auto/have HTTP_MODULES="$HTTP_MODULES ngx_http_stub_status_module" diff --git a/auto/options b/auto/options --- a/auto/options +++ b/auto/options @@ -94,6 +94,7 @@ HTTP_DEGRADATION=NO HTTP_FLV=NO HTTP_GZIP_STATIC=NO HTTP_UPSTREAM_IP_HASH=YES +HTTP_UPSTREAM_KEEPALIVE=YES # STUB HTTP_STUB_STATUS=NO @@ -229,6 +230,7 @@ do --without-http_empty_gif_module) HTTP_EMPTY_GIF=NO ;; --without-http_browser_module) HTTP_BROWSER=NO ;; --without-http_upstream_ip_hash_module) HTTP_UPSTREAM_IP_HASH=NO ;; + --without-http_upstream_keepalive_module) HTTP_UPSTREAM_KEEPALIVE=NO ;; --with-http_perl_module) HTTP_PERL=YES ;; --with-perl_modules_path=*) NGX_PERL_MODULES="$value" ;; diff --git a/auto/sources b/auto/sources --- a/auto/sources +++ b/auto/sources @@ -471,6 +471,11 @@ HTTP_UPSTREAM_IP_HASH_MODULE=ngx_http_up HTTP_UPSTREAM_IP_HASH_SRCS=src/http/modules/ngx_http_upstream_ip_hash_module.c +HTTP_UPSTREAM_KEEPALIVE_MODULE=ngx_http_upstream_keepalive_module +HTTP_UPSTREAM_KEEPALIVE_SRCS=" \ + src/http/modules/ngx_http_upstream_keepalive_module.c" + + MAIL_INCS="src/mail" MAIL_DEPS="src/mail/ngx_mail.h" diff --git a/src/http/modules/ngx_http_upstream_keepalive_module.c b/src/http/modules/ngx_http_upstream_keepalive_module.c new file mode 100644 --- /dev/null +++ b/src/http/modules/ngx_http_upstream_keepalive_module.c @@ -0,0 +1,566 @@ + +/* + * Copyright (C) Maxim Dounin + */ + + +#include +#include +#include + + +typedef struct { + ngx_uint_t max_cached; + ngx_uint_t single; /* unsigned:1 */ + + ngx_queue_t cache; + ngx_queue_t free; + + ngx_http_upstream_init_pt original_init_upstream; + ngx_http_upstream_init_peer_pt original_init_peer; + +} ngx_http_upstream_keepalive_srv_conf_t; + + +typedef struct { + ngx_http_upstream_keepalive_srv_conf_t *conf; + + ngx_http_upstream_t *upstream; + + void *data; + + ngx_event_get_peer_pt original_get_peer; + ngx_event_free_peer_pt original_free_peer; + +#if (NGX_HTTP_SSL) + ngx_event_set_peer_session_pt original_set_session; + ngx_event_save_peer_session_pt original_save_session; +#endif + + ngx_uint_t failed; /* unsigned:1 */ + +} ngx_http_upstream_keepalive_peer_data_t; + + +typedef struct { + ngx_http_upstream_keepalive_srv_conf_t *conf; + + ngx_queue_t queue; + ngx_connection_t *connection; + + socklen_t socklen; + u_char sockaddr[NGX_SOCKADDRLEN]; + +} ngx_http_upstream_keepalive_cache_t; + + +static ngx_int_t ngx_http_upstream_init_keepalive_peer(ngx_http_request_t *r, + ngx_http_upstream_srv_conf_t *us); +static ngx_int_t ngx_http_upstream_get_keepalive_peer(ngx_peer_connection_t *pc, + void *data); +static void ngx_http_upstream_free_keepalive_peer(ngx_peer_connection_t *pc, + void *data, ngx_uint_t state); + +static void ngx_http_upstream_keepalive_dummy_handler(ngx_event_t *ev); +static void ngx_http_upstream_keepalive_close_handler(ngx_event_t *ev); +static void ngx_http_upstream_keepalive_close(ngx_connection_t *c); + + +#if (NGX_HTTP_SSL) +static ngx_int_t ngx_http_upstream_keepalive_set_session( + ngx_peer_connection_t *pc, void *data); +static void ngx_http_upstream_keepalive_save_session(ngx_peer_connection_t *pc, + void *data); +#endif + +static void *ngx_http_upstream_keepalive_create_conf(ngx_conf_t *cf); +static char *ngx_http_upstream_keepalive(ngx_conf_t *cf, ngx_command_t *cmd, + void *conf); + + +static ngx_command_t ngx_http_upstream_keepalive_commands[] = { + + { ngx_string("keepalive"), + NGX_HTTP_UPS_CONF|NGX_CONF_TAKE12, + ngx_http_upstream_keepalive, + 0, + 0, + NULL }, + + ngx_null_command +}; + + +static ngx_http_module_t ngx_http_upstream_keepalive_module_ctx = { + NULL, /* preconfiguration */ + NULL, /* postconfiguration */ + + NULL, /* create main configuration */ + NULL, /* init main configuration */ + + ngx_http_upstream_keepalive_create_conf, /* create server configuration */ + NULL, /* merge server configuration */ + + NULL, /* create location configuration */ + NULL /* merge location configuration */ +}; + + +ngx_module_t ngx_http_upstream_keepalive_module = { + NGX_MODULE_V1, + &ngx_http_upstream_keepalive_module_ctx, /* module context */ + ngx_http_upstream_keepalive_commands, /* module directives */ + NGX_HTTP_MODULE, /* module type */ + NULL, /* init master */ + NULL, /* init module */ + NULL, /* init process */ + NULL, /* init thread */ + NULL, /* exit thread */ + NULL, /* exit process */ + NULL, /* exit master */ + NGX_MODULE_V1_PADDING +}; + + +static ngx_int_t +ngx_http_upstream_init_keepalive(ngx_conf_t *cf, + ngx_http_upstream_srv_conf_t *us) +{ + ngx_uint_t i; + ngx_http_upstream_keepalive_srv_conf_t *kcf; + ngx_http_upstream_keepalive_cache_t *cached; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, cf->log, 0, + "init keepalive"); + + kcf = ngx_http_conf_upstream_srv_conf(us, + ngx_http_upstream_keepalive_module); + + if (kcf->original_init_upstream(cf, us) != NGX_OK) { + return NGX_ERROR; + } + + kcf->original_init_peer = us->peer.init; + + us->peer.init = ngx_http_upstream_init_keepalive_peer; + + /* allocate cache items and add to free queue */ + + cached = ngx_pcalloc(cf->pool, + sizeof(ngx_http_upstream_keepalive_cache_t) * kcf->max_cached); + if (cached == NULL) { + return NGX_ERROR; + } + + ngx_queue_init(&kcf->cache); + ngx_queue_init(&kcf->free); + + for (i = 0; i < kcf->max_cached; i++) { + ngx_queue_insert_head(&kcf->free, &cached[i].queue); + cached[i].conf = kcf; + } + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_upstream_init_keepalive_peer(ngx_http_request_t *r, + ngx_http_upstream_srv_conf_t *us) +{ + ngx_http_upstream_keepalive_peer_data_t *kp; + ngx_http_upstream_keepalive_srv_conf_t *kcf; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, + "init keepalive peer"); + + kcf = ngx_http_conf_upstream_srv_conf(us, + ngx_http_upstream_keepalive_module); + + kp = ngx_palloc(r->pool, sizeof(ngx_http_upstream_keepalive_peer_data_t)); + if (kp == NULL) { + return NGX_ERROR; + } + + if (kcf->original_init_peer(r, us) != NGX_OK) { + return NGX_ERROR; + } + + kp->conf = kcf; + kp->upstream = r->upstream; + kp->data = r->upstream->peer.data; + kp->original_get_peer = r->upstream->peer.get; + kp->original_free_peer = r->upstream->peer.free; + + r->upstream->peer.data = kp; + r->upstream->peer.get = ngx_http_upstream_get_keepalive_peer; + r->upstream->peer.free = ngx_http_upstream_free_keepalive_peer; + +#if (NGX_HTTP_SSL) + kp->original_set_session = r->upstream->peer.set_session; + kp->original_save_session = r->upstream->peer.save_session; + r->upstream->peer.set_session = ngx_http_upstream_keepalive_set_session; + r->upstream->peer.save_session = ngx_http_upstream_keepalive_save_session; +#endif + + return NGX_OK; +} + + +static ngx_int_t +ngx_http_upstream_get_keepalive_peer(ngx_peer_connection_t *pc, void *data) +{ + ngx_http_upstream_keepalive_peer_data_t *kp = data; + ngx_http_upstream_keepalive_cache_t *item; + + ngx_int_t rc; + ngx_queue_t *q, *cache; + ngx_connection_t *c; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "get keepalive peer"); + + kp->failed = 0; + + /* single pool of cached connections */ + + if (kp->conf->single && !ngx_queue_empty(&kp->conf->cache)) { + + q = ngx_queue_head(&kp->conf->cache); + + item = ngx_queue_data(q, ngx_http_upstream_keepalive_cache_t, queue); + c = item->connection; + + ngx_queue_remove(q); + ngx_queue_insert_head(&kp->conf->free, q); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "get keepalive peer: using connection %p", c); + + c->idle = 0; + c->log = pc->log; + c->read->log = pc->log; + c->write->log = pc->log; + c->pool->log = pc->log; + + pc->connection = c; + pc->cached = 1; + + return NGX_DONE; + } + + rc = kp->original_get_peer(pc, kp->data); + + if (kp->conf->single || rc != NGX_OK) { + return rc; + } + + /* search cache for suitable connection */ + + cache = &kp->conf->cache; + + for (q = ngx_queue_head(cache); + q != ngx_queue_sentinel(cache); + q = ngx_queue_next(q)) + { + item = ngx_queue_data(q, ngx_http_upstream_keepalive_cache_t, queue); + c = item->connection; + + if (ngx_memn2cmp((u_char *) &item->sockaddr, (u_char *) pc->sockaddr, + item->socklen, pc->socklen) + == 0) + { + ngx_queue_remove(q); + ngx_queue_insert_head(&kp->conf->free, q); + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "get keepalive peer: using connection %p", c); + + c->idle = 0; + c->log = pc->log; + c->read->log = pc->log; + c->write->log = pc->log; + c->pool->log = pc->log; + + pc->connection = c; + pc->cached = 1; + + return NGX_DONE; + } + } + + return NGX_OK; +} + + +static void +ngx_http_upstream_free_keepalive_peer(ngx_peer_connection_t *pc, void *data, + ngx_uint_t state) +{ + ngx_http_upstream_keepalive_peer_data_t *kp = data; + ngx_http_upstream_keepalive_cache_t *item; + + ngx_queue_t *q; + ngx_connection_t *c; + ngx_http_upstream_t *u; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "free keepalive peer"); + + /* remember failed state - peer.free() may be called more than once */ + + if (state & NGX_PEER_FAILED) { + kp->failed = 1; + } + + /* cache valid connections */ + + u = kp->upstream; + c = pc->connection; + + if (kp->failed + || c == NULL + || c->read->eof + || c->read->ready + || c->read->error + || c->read->timedout + || c->write->error + || c->write->timedout) + { + goto invalid; + } + + if (!u->keepalive) { + goto invalid; + } + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + goto invalid; + } + + ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0, + "free keepalive peer: saving connection %p", c); + + if (ngx_queue_empty(&kp->conf->free)) { + + q = ngx_queue_last(&kp->conf->cache); + ngx_queue_remove(q); + + item = ngx_queue_data(q, ngx_http_upstream_keepalive_cache_t, queue); + + ngx_http_upstream_keepalive_close(item->connection); + + } else { + q = ngx_queue_head(&kp->conf->free); + ngx_queue_remove(q); + + item = ngx_queue_data(q, ngx_http_upstream_keepalive_cache_t, queue); + } + + item->connection = c; + ngx_queue_insert_head(&kp->conf->cache, q); + + pc->connection = NULL; + + if (c->read->timer_set) { + ngx_del_timer(c->read); + } + if (c->write->timer_set) { + ngx_del_timer(c->write); + } + + c->write->handler = ngx_http_upstream_keepalive_dummy_handler; + c->read->handler = ngx_http_upstream_keepalive_close_handler; + + c->data = item; + c->idle = 1; + c->log = ngx_cycle->log; + c->read->log = ngx_cycle->log; + c->write->log = ngx_cycle->log; + c->pool->log = ngx_cycle->log; + + item->socklen = pc->socklen; + ngx_memcpy(&item->sockaddr, pc->sockaddr, pc->socklen); + +invalid: + + kp->original_free_peer(pc, kp->data, state); +} + + +static void +ngx_http_upstream_keepalive_dummy_handler(ngx_event_t *ev) +{ + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ev->log, 0, + "keepalive dummy handler"); +} + + +static void +ngx_http_upstream_keepalive_close_handler(ngx_event_t *ev) +{ + ngx_http_upstream_keepalive_srv_conf_t *conf; + ngx_http_upstream_keepalive_cache_t *item; + + int n; + char buf[1]; + ngx_connection_t *c; + + ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ev->log, 0, + "keepalive close handler"); + + c = ev->data; + + if (c->close) { + goto close; + } + + n = recv(c->fd, buf, 1, MSG_PEEK); + + if (n == -1 && ngx_socket_errno == NGX_EAGAIN) { + /* stale event */ + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + goto close; + } + + return; + } + +close: + + item = c->data; + conf = item->conf; + + ngx_http_upstream_keepalive_close(c); + + ngx_queue_remove(&item->queue); + ngx_queue_insert_head(&conf->free, &item->queue); +} + + +static void +ngx_http_upstream_keepalive_close(ngx_connection_t *c) +{ + +#if (NGX_HTTP_SSL) + + if (c->ssl) { + c->ssl->no_wait_shutdown = 1; + c->ssl->no_send_shutdown = 1; + + if (ngx_ssl_shutdown(c) == NGX_AGAIN) { + c->ssl->handler = ngx_http_upstream_keepalive_close; + return; + } + } + +#endif + + ngx_destroy_pool(c->pool); + ngx_close_connection(c); +} + + +#if (NGX_HTTP_SSL) + +static ngx_int_t +ngx_http_upstream_keepalive_set_session(ngx_peer_connection_t *pc, void *data) +{ + ngx_http_upstream_keepalive_peer_data_t *kp = data; + + return kp->original_set_session(pc, kp->data); +} + + +static void +ngx_http_upstream_keepalive_save_session(ngx_peer_connection_t *pc, void *data) +{ + ngx_http_upstream_keepalive_peer_data_t *kp = data; + + kp->original_save_session(pc, kp->data); + return; +} + +#endif + + +static void * +ngx_http_upstream_keepalive_create_conf(ngx_conf_t *cf) +{ + ngx_http_upstream_keepalive_srv_conf_t *conf; + + conf = ngx_pcalloc(cf->pool, + sizeof(ngx_http_upstream_keepalive_srv_conf_t)); + if (conf == NULL) { + return NULL; + } + + /* + * set by ngx_pcalloc(): + * + * conf->original_init_upstream = NULL; + * conf->original_init_peer = NULL; + */ + + conf->max_cached = 1; + + return conf; +} + + +static char * +ngx_http_upstream_keepalive(ngx_conf_t *cf, ngx_command_t *cmd, void *conf) +{ + ngx_http_upstream_srv_conf_t *uscf; + ngx_http_upstream_keepalive_srv_conf_t *kcf; + + ngx_int_t n; + ngx_str_t *value; + ngx_uint_t i; + + uscf = ngx_http_conf_get_module_srv_conf(cf, ngx_http_upstream_module); + + kcf = ngx_http_conf_upstream_srv_conf(uscf, + ngx_http_upstream_keepalive_module); + + kcf->original_init_upstream = uscf->peer.init_upstream + ? uscf->peer.init_upstream + : ngx_http_upstream_init_round_robin; + + uscf->peer.init_upstream = ngx_http_upstream_init_keepalive; + + /* read options */ + + value = cf->args->elts; + + n = ngx_atoi(value[1].data, value[1].len); + + if (n == NGX_ERROR || n == 0) { + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid value \"%V\" in \"%V\" directive", + &value[1], &cmd->name); + return NGX_CONF_ERROR; + } + + kcf->max_cached = n; + + for (i = 2; i < cf->args->nelts; i++) { + + if (ngx_strcmp(value[i].data, "single") == 0) { + kcf->single = 1; + continue; + } + + goto invalid; + } + + return NGX_CONF_OK; + +invalid: + + ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, + "invalid parameter \"%V\"", &value[i]); + + return NGX_CONF_ERROR; +}