fastcgi performance at 10K

Maxim Dounin mdounin at mdounin.ru
Thu Apr 23 05:55:37 MSD 2009


Hello!

On Wed, Apr 15, 2009 at 10:23:11PM +0400, Maxim Dounin wrote:

> On Wed, Apr 15, 2009 at 01:09:30PM +0100, Valery Kholodkov wrote:
> 
> > 
> > ----- "Maxim Dounin" <mdounin at mdounin.ru> wrote:
> > 
> > > У меня есть работающий прототип поддержки keepalive для fastcgi.  
> > > Если очень хочется потестировать - могу поделиться патчами.
> > > 
> > > Но надо понимать что на сколько-нибудь тяжёлых fastcgi запросах 
> > > это не приведёт к заметному ускорению, и описанные проблемы скорее 
> > > всего не вылечит (а может быть и усугубит).
> > 
> > Вне зависимости от того, кто что про это думает, код в студию!
> 
> Патчи прилагаются, накладывать по очереди:
> 
> patch-nginx-proxy-flush-2.txt
> patch-nginx-proxy-length-2.txt
> patch-nginx-keepalive.txt

Откатить patch-nginx-keepalive.txt, накатить прилагающийся 
patch-nginx-keepalive-2.txt.

Исправляет проблему с зависанием fastcgi соединений в некоторых 
ситуациях (не всегда корректно проставлялось минимальное 
количество байт, которое следует отдавать фильтру).

Спасибо Денису Латыпову за обнаружение проблемы.

Maxim Dounin
-------------- next part --------------
# HG changeset patch
# User Maxim Dounin <mdounin at mdounin.ru>
# Date 1240450511 -14400
# Node ID 2612ebcd979b20c0c496eaa4035f69292c9679b7
# Parent  c3034cfee8a3c13a2c69418877cbf133ac27a103
Upstream: keepalive support in memcached and fastcgi.

Currently dirty, but works (and passes all tests).

diff --git a/src/http/modules/ngx_http_fastcgi_module.c b/src/http/modules/ngx_http_fastcgi_module.c
--- a/src/http/modules/ngx_http_fastcgi_module.c
+++ b/src/http/modules/ngx_http_fastcgi_module.c
@@ -73,6 +73,8 @@ typedef struct {
 
 #define NGX_HTTP_FASTCGI_RESPONDER      1
 
+#define NGX_HTTP_FASTCGI_KEEP_CONN      1
+
 #define NGX_HTTP_FASTCGI_BEGIN_REQUEST  1
 #define NGX_HTTP_FASTCGI_ABORT_REQUEST  2
 #define NGX_HTTP_FASTCGI_END_REQUEST    3
@@ -463,7 +465,7 @@ static ngx_http_fastcgi_request_start_t 
 
     { 0,                                               /* role_hi */
       NGX_HTTP_FASTCGI_RESPONDER,                      /* role_lo */
-      0, /* NGX_HTTP_FASTCGI_KEEP_CONN */              /* flags */
+      NGX_HTTP_FASTCGI_KEEP_CONN,                      /* flags */
       { 0, 0, 0, 0, 0 } },                             /* reserved[5] */
 
     { 1,                                               /* version */
@@ -1397,6 +1399,7 @@ ngx_http_fastcgi_process_header(ngx_http
         }
 
         if (rc == NGX_HTTP_PARSE_HEADER_DONE) {
+            u->pipe->length = sizeof(ngx_http_fastcgi_header_t);
             return NGX_OK;
         }
 
@@ -1465,7 +1468,10 @@ ngx_http_fastcgi_input_filter(ngx_event_
 
             if (f->type == NGX_HTTP_FASTCGI_STDOUT && f->length == 0) {
                 f->state = ngx_http_fastcgi_st_version;
+                /* XXX */
+#if 0
                 p->upstream_done = 1;
+#endif
 
                 ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0,
                                "http fastcgi closed stdout");
@@ -1476,6 +1482,7 @@ ngx_http_fastcgi_input_filter(ngx_event_
             if (f->type == NGX_HTTP_FASTCGI_END_REQUEST) {
                 f->state = ngx_http_fastcgi_st_version;
                 p->upstream_done = 1;
+                r->upstream->keepalive = 1;
 
                 ngx_log_debug0(NGX_LOG_DEBUG_HTTP, p->log, 0,
                                "http fastcgi sent end request");
@@ -1638,6 +1645,20 @@ ngx_http_fastcgi_input_filter(ngx_event_
 
     }
 
+    /* XXX */
+
+    if (f->state < ngx_http_fastcgi_st_data) {
+        p->length = 1;
+
+    } else if (f->state == ngx_http_fastcgi_st_padding) {
+        p->length = f->padding;
+
+    } else {
+        /* ngx_http_fastcgi_st_data */
+
+        p->length = f->length;
+    }
+
     if (b) {
         b->shadow = buf;
         b->last_shadow = 1;
diff --git a/src/http/modules/ngx_http_memcached_module.c b/src/http/modules/ngx_http_memcached_module.c
--- a/src/http/modules/ngx_http_memcached_module.c
+++ b/src/http/modules/ngx_http_memcached_module.c
@@ -385,6 +385,7 @@ found:
 
         u->headers_in.status_n = 404;
         u->state->status = 404;
+        u->keepalive = 1;
 
         return NGX_OK;
     }
@@ -435,6 +436,7 @@ ngx_http_memcached_filter(void *data, ss
         {
             ngx_log_error(NGX_LOG_ERR, ctx->request->connection->log, 0,
                           "memcached sent invalid trailer");
+            u->keepalive = 0;
         }
 
         u->length = 0;
@@ -477,6 +479,9 @@ ngx_http_memcached_filter(void *data, ss
     if (ngx_strncmp(last, ngx_http_memcached_end, b->last - last) != 0) {
         ngx_log_error(NGX_LOG_ERR, ctx->request->connection->log, 0,
                       "memcached sent invalid trailer");
+
+    } else {
+        u->keepalive = 1;
     }
 
     ctx->rest -= b->last - last;
diff --git a/src/http/modules/ngx_http_proxy_module.c b/src/http/modules/ngx_http_proxy_module.c
--- a/src/http/modules/ngx_http_proxy_module.c
+++ b/src/http/modules/ngx_http_proxy_module.c
@@ -1527,6 +1527,15 @@ ngx_http_proxy_process_header(ngx_http_r
                 h->lowcase_key = (u_char *) "date";
             }
 
+            /* XXX */
+
+            if (r->headers_out.content_length_n != -1) {
+                r->upstream->pipe->length = r->headers_out.content_length_n;
+
+            } else {
+                r->upstream->pipe->length = NGX_MAX_OFF_T_VALUE;
+            }
+
             return NGX_OK;
         }
 
diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c
--- a/src/http/ngx_http_upstream.c
+++ b/src/http/ngx_http_upstream.c
@@ -1371,6 +1371,8 @@ ngx_http_upstream_process_header(ngx_htt
 
         rc = u->process_header(r);
 
+        /* XXX */
+
         if (rc == NGX_AGAIN) {
 
             if (u->buffer.pos == u->buffer.end) {
@@ -1726,6 +1728,8 @@ ngx_http_upstream_process_headers(ngx_ht
 
     u->headers_in.content_length_n = r->headers_out.content_length_n;
 
+    /* XXX */
+
     if (r->headers_out.content_length_n != -1) {
         u->length = (size_t) r->headers_out.content_length_n;
 
@@ -1839,6 +1843,8 @@ ngx_http_upstream_send_response(ngx_http
 
     if (!u->buffering) {
 
+        /* XXX */
+
         if (u->input_filter == NULL) {
             u->input_filter_init = ngx_http_upstream_non_buffered_filter_init;
             u->input_filter = ngx_http_upstream_non_buffered_filter;
@@ -1962,13 +1968,6 @@ ngx_http_upstream_send_response(ngx_http
     p->pool = r->pool;
     p->log = c->log;
 
-    if (r->headers_out.content_length_n != -1) {
-        p->length = r->headers_out.content_length_n;
-
-    } else {
-        p->length = NGX_MAX_OFF_T_VALUE;
-    }
-
     p->cacheable = u->cacheable || u->store;
 
     p->temp_file = ngx_pcalloc(r->pool, sizeof(ngx_temp_file_t));
diff --git a/src/http/ngx_http_upstream.h b/src/http/ngx_http_upstream.h
--- a/src/http/ngx_http_upstream.h
+++ b/src/http/ngx_http_upstream.h
@@ -293,6 +293,7 @@ struct ngx_http_upstream_s {
 #endif
 
     unsigned                         buffering:1;
+    unsigned                         keepalive:1;
 
     unsigned                         request_sent:1;
     unsigned                         header_sent:1;


More information about the nginx-ru mailing list