diff options
author | Maxim Dounin <mdounin@mdounin.ru> | 2016-03-28 19:49:52 +0300 |
---|---|---|
committer | Maxim Dounin <mdounin@mdounin.ru> | 2016-03-28 19:49:52 +0300 |
commit | 5a76856dc2014fdad479cdb4dcafcd384d0b1952 (patch) | |
tree | 1cc01a30a46c88abcf76c1295ec16d3ac4f59064 /src | |
parent | f5fff1eda05ef0a1370f9a83068526b3e8aebfc7 (diff) | |
download | nginx-5a76856dc2014fdad479cdb4dcafcd384d0b1952.tar.gz nginx-5a76856dc2014fdad479cdb4dcafcd384d0b1952.zip |
Upstream: cached connections now tested against next_upstream.
Much like normal connections, cached connections are now tested against
u->conf->next_upstream, and u->state->status is now always set.
This allows to disable additional tries even with upstream keepalive
by using "proxy_next_upstream off".
Diffstat (limited to 'src')
-rw-r--r-- | src/http/ngx_http_upstream.c | 91 |
1 files changed, 42 insertions, 49 deletions
diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c index 67bd38333..3705d1453 100644 --- a/src/http/ngx_http_upstream.c +++ b/src/http/ngx_http_upstream.c @@ -3947,42 +3947,36 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, "upstream timed out"); } - if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR - && (!u->request_sent || !r->request_body_no_buffering)) - { - status = 0; - + if (u->peer.cached && ft_type == NGX_HTTP_UPSTREAM_FT_ERROR) { /* TODO: inform balancer instead */ - u->peer.tries++; + } - } else { - switch (ft_type) { + switch (ft_type) { - case NGX_HTTP_UPSTREAM_FT_TIMEOUT: - status = NGX_HTTP_GATEWAY_TIME_OUT; - break; + case NGX_HTTP_UPSTREAM_FT_TIMEOUT: + status = NGX_HTTP_GATEWAY_TIME_OUT; + break; - case NGX_HTTP_UPSTREAM_FT_HTTP_500: - status = NGX_HTTP_INTERNAL_SERVER_ERROR; - break; + case NGX_HTTP_UPSTREAM_FT_HTTP_500: + status = NGX_HTTP_INTERNAL_SERVER_ERROR; + break; - case NGX_HTTP_UPSTREAM_FT_HTTP_403: - status = NGX_HTTP_FORBIDDEN; - break; + case NGX_HTTP_UPSTREAM_FT_HTTP_403: + status = NGX_HTTP_FORBIDDEN; + break; - case NGX_HTTP_UPSTREAM_FT_HTTP_404: - status = NGX_HTTP_NOT_FOUND; - break; + case NGX_HTTP_UPSTREAM_FT_HTTP_404: + status = NGX_HTTP_NOT_FOUND; + break; - /* - * NGX_HTTP_UPSTREAM_FT_BUSY_LOCK and NGX_HTTP_UPSTREAM_FT_MAX_WAITING - * never reach here - */ + /* + * NGX_HTTP_UPSTREAM_FT_BUSY_LOCK and NGX_HTTP_UPSTREAM_FT_MAX_WAITING + * never reach here + */ - default: - status = NGX_HTTP_BAD_GATEWAY; - } + default: + status = NGX_HTTP_BAD_GATEWAY; } if (r->connection->error) { @@ -3991,37 +3985,36 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, return; } - if (status) { - u->state->status = status; - timeout = u->conf->next_upstream_timeout; + u->state->status = status; - if (u->peer.tries == 0 - || !(u->conf->next_upstream & ft_type) - || (u->request_sent && r->request_body_no_buffering) - || (timeout && ngx_current_msec - u->peer.start_time >= timeout)) - { -#if (NGX_HTTP_CACHE) + timeout = u->conf->next_upstream_timeout; - if (u->cache_status == NGX_HTTP_CACHE_EXPIRED - && (u->conf->cache_use_stale & ft_type)) - { - ngx_int_t rc; + if (u->peer.tries == 0 + || !(u->conf->next_upstream & ft_type) + || (u->request_sent && r->request_body_no_buffering) + || (timeout && ngx_current_msec - u->peer.start_time >= timeout)) + { +#if (NGX_HTTP_CACHE) - rc = u->reinit_request(r); + if (u->cache_status == NGX_HTTP_CACHE_EXPIRED + && (u->conf->cache_use_stale & ft_type)) + { + ngx_int_t rc; - if (rc == NGX_OK) { - u->cache_status = NGX_HTTP_CACHE_STALE; - rc = ngx_http_upstream_cache_send(r, u); - } + rc = u->reinit_request(r); - ngx_http_upstream_finalize_request(r, u, rc); - return; + if (rc == NGX_OK) { + u->cache_status = NGX_HTTP_CACHE_STALE; + rc = ngx_http_upstream_cache_send(r, u); } -#endif - ngx_http_upstream_finalize_request(r, u, status); + ngx_http_upstream_finalize_request(r, u, rc); return; } +#endif + + ngx_http_upstream_finalize_request(r, u, status); + return; } if (u->peer.connection) { |