From 11bf9384ed3dd1aaca880d7a9dbfb0805aff0748 Mon Sep 17 00:00:00 2001 From: Thomas Heil Date: Tue, 24 Jun 2014 19:39:47 +0200 Subject: [PATCH] haproxy: upgrade to version 1.5.1 Bugs from 1.5.0 can cause file descriptor leak, results in the impossibility to accept new connections after some time. Signed-off-by: Thomas Heil --- net/haproxy/Makefile | 6 +- ...onsistently-use-check-in-process_chk.patch | 31 -- ...fetch-base-is-not-compatible-with-se.patch | 45 +++ ...jects-OCSP-response-without-nextupda.patch | 39 --- ...ix-to-not-serve-expired-OCSP-respons.patch | 134 -------- ...x-OCSP-resp-update-fails-with-the-sa.patch | 27 -- ...n-revert-all-the-crappy-client-side-.patch | 293 ------------------ ...roperly-initialize-and-count-log-soc.patch | 110 ------- 8 files changed, 48 insertions(+), 637 deletions(-) delete mode 100644 net/haproxy/patches/0001-BUG-MEDIUM-Consistently-use-check-in-process_chk.patch create mode 100644 net/haproxy/patches/0001-BUG-MEDIUM-http-fetch-base-is-not-compatible-with-se.patch delete mode 100644 net/haproxy/patches/0002-BUG-MINOR-ssl-rejects-OCSP-response-without-nextupda.patch delete mode 100644 net/haproxy/patches/0003-BUG-MEDIUM-ssl-Fix-to-not-serve-expired-OCSP-respons.patch delete mode 100644 net/haproxy/patches/0004-BUG-MINOR-ssl-Fix-OCSP-resp-update-fails-with-the-sa.patch delete mode 100644 net/haproxy/patches/0005-BUG-MAJOR-session-revert-all-the-crappy-client-side-.patch delete mode 100644 net/haproxy/patches/0006-BUG-MINOR-logs-properly-initialize-and-count-log-soc.patch diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 8e223f26e9..2f46115700 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -9,11 +9,11 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy -PKG_VERSION:=1.5.0 -PKG_RELEASE:=06 +PKG_VERSION:=1.5.1 +PKG_RELEASE:=01 PKG_SOURCE:=haproxy-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=http://haproxy.1wt.eu/download/1.5/src/ -PKG_MD5SUM:=e33bb97e644e98af948090f1ecebbda9 +PKG_MD5SUM:=49640cf3ddd793a05fbd3394481a1ed4 PKG_MAINTAINER:=Thomas Heil PKG_LICENSE:=GPL-2.0 diff --git a/net/haproxy/patches/0001-BUG-MEDIUM-Consistently-use-check-in-process_chk.patch b/net/haproxy/patches/0001-BUG-MEDIUM-Consistently-use-check-in-process_chk.patch deleted file mode 100644 index 318bd61c41..0000000000 --- a/net/haproxy/patches/0001-BUG-MEDIUM-Consistently-use-check-in-process_chk.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 9ac7cabaf9945fb92c96cb92f5ea85235f54f7d6 Mon Sep 17 00:00:00 2001 -From: Simon Horman -Date: Fri, 20 Jun 2014 12:29:47 +0900 -Subject: [PATCH] BUG/MEDIUM: Consistently use 'check' in process_chk - -I am not entirely sure that this is a bug, but it seems -to me that it may cause a problem if there agent-check is -configured and there is some kind of error making a connection for it. - -Signed-off-by: Simon Horman -(cherry picked from commit ccaabcdfca23851af6fd83f4f3265284d283e2ab) ---- - src/checks.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/checks.c b/src/checks.c -index cba0018..f3b2b54 100644 ---- a/src/checks.c -+++ b/src/checks.c -@@ -1541,7 +1541,7 @@ static struct task *process_chk(struct task *t) - * First, let's check whether there was an uncaught error, - * which can happen on connect timeout or error. - */ -- if (s->check.result == CHK_RES_UNKNOWN) { -+ if (check->result == CHK_RES_UNKNOWN) { - /* good connection is enough for pure TCP check */ - if ((conn->flags & CO_FL_CONNECTED) && !check->type) { - if (check->use_ssl) --- -1.8.5.5 - diff --git a/net/haproxy/patches/0001-BUG-MEDIUM-http-fetch-base-is-not-compatible-with-se.patch b/net/haproxy/patches/0001-BUG-MEDIUM-http-fetch-base-is-not-compatible-with-se.patch new file mode 100644 index 0000000000..e154d8ecbf --- /dev/null +++ b/net/haproxy/patches/0001-BUG-MEDIUM-http-fetch-base-is-not-compatible-with-se.patch @@ -0,0 +1,45 @@ +From c1fbbd4a3dd480b4eebbd8b32ca6cdf08791477a Mon Sep 17 00:00:00 2001 +From: Willy Tarreau +Date: Tue, 24 Jun 2014 17:27:02 +0200 +Subject: [PATCH] BUG/MEDIUM: http: fetch "base" is not compatible with + set-header + +The sample fetch function "base" makes use of the trash which is also +used by set-header/add-header etc... everything which builds a formated +line. So we end up with some junk in the header if base is in use. Let's +fix this as all other fetches by using a trash chunk instead. + +This bug was reported by Baptiste Assmann, and also affects 1.5. +(cherry picked from commit 3caf2afabe89fb0ef0886cd1d8ea99ef21ec3491) +--- + src/proto_http.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/src/proto_http.c b/src/proto_http.c +index 231d49a..5321f7d 100644 +--- a/src/proto_http.c ++++ b/src/proto_http.c +@@ -10247,6 +10247,7 @@ smp_fetch_base(struct proxy *px, struct session *l4, void *l7, unsigned int opt, + struct http_txn *txn = l7; + char *ptr, *end, *beg; + struct hdr_ctx ctx; ++ struct chunk *temp; + + CHECK_HTTP_MESSAGE_FIRST(); + +@@ -10255,9 +10256,10 @@ smp_fetch_base(struct proxy *px, struct session *l4, void *l7, unsigned int opt, + return smp_fetch_path(px, l4, l7, opt, args, smp, kw); + + /* OK we have the header value in ctx.line+ctx.val for ctx.vlen bytes */ +- memcpy(trash.str, ctx.line + ctx.val, ctx.vlen); ++ temp = get_trash_chunk(); ++ memcpy(temp->str, ctx.line + ctx.val, ctx.vlen); + smp->type = SMP_T_STR; +- smp->data.str.str = trash.str; ++ smp->data.str.str = temp->str; + smp->data.str.len = ctx.vlen; + + /* now retrieve the path */ +-- +1.8.5.5 + diff --git a/net/haproxy/patches/0002-BUG-MINOR-ssl-rejects-OCSP-response-without-nextupda.patch b/net/haproxy/patches/0002-BUG-MINOR-ssl-rejects-OCSP-response-without-nextupda.patch deleted file mode 100644 index 0f16a11e2c..0000000000 --- a/net/haproxy/patches/0002-BUG-MINOR-ssl-rejects-OCSP-response-without-nextupda.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 1135ea40b0ae5e5a98ee0cb9e13491664356adfc Mon Sep 17 00:00:00 2001 -From: Emeric Brun -Date: Fri, 20 Jun 2014 15:44:34 +0200 -Subject: [PATCH 2/5] BUG/MINOR: ssl: rejects OCSP response without nextupdate. - -To cache an OCSP Response without expiration time is not safe. -(cherry picked from commit 13a6b48e241c0a50b501446992ab4fda2529f317) ---- - src/ssl_sock.c | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/src/ssl_sock.c b/src/ssl_sock.c -index ad4b1ca..278af8b 100644 ---- a/src/ssl_sock.c -+++ b/src/ssl_sock.c -@@ -139,7 +139,7 @@ static int ssl_sock_load_ocsp_response(struct chunk *ocsp_response, struct certi - OCSP_SINGLERESP *sr; - unsigned char *p = (unsigned char *)ocsp_response->str; - int rc , count_sr; -- ASN1_GENERALIZEDTIME *revtime, *thisupd, *nextupd; -+ ASN1_GENERALIZEDTIME *revtime, *thisupd, *nextupd = NULL; - int reason; - int ret = 1; - -@@ -179,6 +179,11 @@ static int ssl_sock_load_ocsp_response(struct chunk *ocsp_response, struct certi - goto out; - } - -+ if (!nextupd) { -+ memprintf(err, "OCSP single response: missing nextupdate"); -+ goto out; -+ } -+ - rc = OCSP_check_validity(thisupd, nextupd, OCSP_MAX_RESPONSE_TIME_SKEW, -1); - if (!rc) { - memprintf(err, "OCSP single response: no longer valid."); --- -1.8.5.5 - diff --git a/net/haproxy/patches/0003-BUG-MEDIUM-ssl-Fix-to-not-serve-expired-OCSP-respons.patch b/net/haproxy/patches/0003-BUG-MEDIUM-ssl-Fix-to-not-serve-expired-OCSP-respons.patch deleted file mode 100644 index f909360275..0000000000 --- a/net/haproxy/patches/0003-BUG-MEDIUM-ssl-Fix-to-not-serve-expired-OCSP-respons.patch +++ /dev/null @@ -1,134 +0,0 @@ -From 5848437fa171c593f777226306b146d02a09f70e Mon Sep 17 00:00:00 2001 -From: Emeric Brun -Date: Fri, 20 Jun 2014 15:46:13 +0200 -Subject: [PATCH 3/5] BUG/MEDIUM: ssl: Fix to not serve expired OCSP responses. - -For some browsers (firefox), an expired OCSP Response causes unwanted behavior. - -Haproxy stops serving OCSP response if nextupdate date minus -the supported time skew (#define OCSP_MAX_RESPONSE_TIME_SKEW) is -in the past. -(cherry picked from commit 4f3c87a5d942d4d0649c35805ff4e335970b87d4) ---- - src/ssl_sock.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- - 1 file changed, 87 insertions(+), 2 deletions(-) - -diff --git a/src/ssl_sock.c b/src/ssl_sock.c -index 278af8b..9eacf9f 100644 ---- a/src/ssl_sock.c -+++ b/src/ssl_sock.c -@@ -110,9 +110,91 @@ struct certificate_ocsp { - struct ebmb_node key; - unsigned char key_data[OCSP_MAX_CERTID_ASN1_LENGTH]; - struct chunk response; -- -+ long expire; - }; - -+/* -+ * This function returns the number of seconds elapsed -+ * since the Epoch, 1970-01-01 00:00:00 +0000 (UTC) and the -+ * date presented un ASN1_GENERALIZEDTIME. -+ * -+ * In parsing error case, it returns -1. -+ */ -+static long asn1_generalizedtime_to_epoch(ASN1_GENERALIZEDTIME *d) -+{ -+ long epoch; -+ char *p, *end; -+ const unsigned short month_offset[12] = { -+ 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 -+ }; -+ int year, month; -+ -+ if (!d || (d->type != V_ASN1_GENERALIZEDTIME)) return -1; -+ -+ p = (char *)d->data; -+ end = p + d->length; -+ -+ if (end - p < 4) return -1; -+ year = 1000 * (p[0] - '0') + 100 * (p[1] - '0') + 10 * (p[2] - '0') + p[3] - '0'; -+ p += 4; -+ if (end - p < 2) return -1; -+ month = 10 * (p[0] - '0') + p[1] - '0'; -+ if (month < 1 || month > 12) return -1; -+ /* Compute the number of seconds since 1 jan 1970 and the beginning of current month -+ We consider leap years and the current month ( '9') -+ goto nosec; -+ if (end - p < 2) return -1; -+ /* Add the seconds of the current minute */ -+ epoch += 10 * (p[0] - '0') + p[1] - '0'; -+ p += 2; -+ if (p == end) return -1; -+ /* Ignore seconds float part if present */ -+ if (p[0] == '.') { -+ do { -+ if (++p == end) return -1; -+ } while (p[0] >= '0' && p[0] <= '9'); -+ } -+ -+nosec: -+ if (p[0] == 'Z') { -+ if (end - p != 1) return -1; -+ return epoch; -+ } -+ else if (p[0] == '+') { -+ if (end - p != 5) return -1; -+ /* Apply timezone offset */ -+ return epoch - ((10 * (p[1] - '0') + p[2] - '0') * 60 + (10 * (p[3] - '0') + p[4] - '0')) * 60; -+ } -+ else if (p[0] == '-') { -+ if (end - p != 5) return -1; -+ /* Apply timezone offset */ -+ return epoch + ((10 * (p[1] - '0') + p[2] - '0') * 60 + (10 * (p[3] - '0') + p[4] - '0')) * 60; -+ } -+ -+ return -1; -+} -+ - static struct eb_root cert_ocsp_tree; - - /* This function starts to check if the OCSP response (in DER format) contained -@@ -229,6 +311,8 @@ static int ssl_sock_load_ocsp_response(struct chunk *ocsp_response, struct certi - goto out; - } - -+ ocsp->expire = asn1_generalizedtime_to_epoch(nextupd) - OCSP_MAX_RESPONSE_TIME_SKEW; -+ - ret = 0; - out: - if (bs) -@@ -306,7 +390,8 @@ int ssl_sock_ocsp_stapling_cbk(SSL *ssl, void *arg) - - if (!ocsp || - !ocsp->response.str || -- !ocsp->response.len) -+ !ocsp->response.len || -+ (ocsp->expire < now.tv_sec)) - return SSL_TLSEXT_ERR_NOACK; - - ssl_buf = OPENSSL_malloc(ocsp->response.len); --- -1.8.5.5 - diff --git a/net/haproxy/patches/0004-BUG-MINOR-ssl-Fix-OCSP-resp-update-fails-with-the-sa.patch b/net/haproxy/patches/0004-BUG-MINOR-ssl-Fix-OCSP-resp-update-fails-with-the-sa.patch deleted file mode 100644 index 704055c4fe..0000000000 --- a/net/haproxy/patches/0004-BUG-MINOR-ssl-Fix-OCSP-resp-update-fails-with-the-sa.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 8d914d1c9c069fada5f34b0c5e27afa4ba6b9920 Mon Sep 17 00:00:00 2001 -From: Emeric Brun -Date: Fri, 20 Jun 2014 15:37:32 +0200 -Subject: [PATCH 4/5] BUG/MINOR: ssl: Fix OCSP resp update fails with the same - certificate configured twice. (cherry picked from commit - 1d3865b096b43b9a6d6a564ffb424ffa6f1ef79f) - ---- - src/ssl_sock.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/ssl_sock.c b/src/ssl_sock.c -index 9eacf9f..328b978 100644 ---- a/src/ssl_sock.c -+++ b/src/ssl_sock.c -@@ -195,7 +195,7 @@ nosec: - return -1; - } - --static struct eb_root cert_ocsp_tree; -+static struct eb_root cert_ocsp_tree = EB_ROOT_UNIQUE; - - /* This function starts to check if the OCSP response (in DER format) contained - * in chunk 'ocsp_response' is valid (else exits on error). --- -1.8.5.5 - diff --git a/net/haproxy/patches/0005-BUG-MAJOR-session-revert-all-the-crappy-client-side-.patch b/net/haproxy/patches/0005-BUG-MAJOR-session-revert-all-the-crappy-client-side-.patch deleted file mode 100644 index 230ab10d46..0000000000 --- a/net/haproxy/patches/0005-BUG-MAJOR-session-revert-all-the-crappy-client-side-.patch +++ /dev/null @@ -1,293 +0,0 @@ -From 2a4f511b33958b5a09cee2913f1ed9d3210f98f5 Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Mon, 23 Jun 2014 15:22:31 +0200 -Subject: [PATCH 5/5] BUG/MAJOR: session: revert all the crappy client-side - timeout changes - -This is the 3rd regression caused by the changes below. The latest to -date was reported by Finn Arne Gangstad. If a server responds with no -content-length and the client's FIN is never received, either we leak -the client-side FD or we spin at 100% CPU if timeout client-fin is set. - -Enough is enough. The amount of tricks needed to cover these side-effects -starts to look like used toilet paper stacked over a chocolate cake. I -don't want to eat that cake anymore! - -All this to avoid reporting a server-side timeout when a client stops -uploading data and haproxy expires faster than the server... A lot of -"ifs" resulting in a technically valid log that doesn't always please -users, and whose alternative causes that many issues for all others -users. - -So let's revert this crap merged since 1.5-dev25 : - Revert "CLEANUP: http: don't clear CF_READ_NOEXP twice" - This reverts commit 1592d1e72a4a2d25a554c299ae95a3e6cad80bf1. - Revert "BUG/MEDIUM: http: clear CF_READ_NOEXP when preparing a new transaction" - This reverts commit 77d29029af1c44216b190dd7442964b9d8f45257. - Revert "BUG/MEDIUM: session: don't clear CF_READ_NOEXP if analysers are not called" - This reverts commit 0943757a2144761c60e416b5ed07baa76934f5a4. - Revert "BUG/MEDIUM: http: disable server-side expiration until client has sent the body" - This reverts commit 3bed5e9337fd6eeab0f0006ebefcbe98ee5c4f9f. - Revert "BUG/MEDIUM: http: correctly report request body timeouts" - This reverts commit b9edf8fbecc9d1b5c82794735adcc367a80a4ae2. - Revert "BUG/MEDIUM: http/session: disable client-side expiration only after body" - This reverts commit b1982e27aaff2a92a389a9f1bc847e3bb8fdb4f2. - -If a cleaner AND SAFER way to do something equivalent in 1.6-dev, we *might* -consider backporting it to 1.5, but given the vicious bugs that have surfaced -since, I doubt it will happen any time soon. - -Fortunately, that crap never made it into 1.4 so no backport is needed. -(cherry picked from commit 6f0a7bac282c9b2082dc763977b7721b6b002089) ---- - src/proto_http.c | 95 ++------------------------------------------------------ - src/session.c | 41 ++++++++++++------------ - 2 files changed, 23 insertions(+), 113 deletions(-) - -diff --git a/src/proto_http.c b/src/proto_http.c -index 52319a9..878951f 100644 ---- a/src/proto_http.c -+++ b/src/proto_http.c -@@ -4884,7 +4884,7 @@ void http_end_txn_clean_session(struct session *s) - s->req->cons->conn_retries = 0; /* used for logging too */ - s->req->cons->exp = TICK_ETERNITY; - s->req->cons->flags &= SI_FL_DONT_WAKE; /* we're in the context of process_session */ -- s->req->flags &= ~(CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CONNECT|CF_WRITE_ERROR|CF_STREAMER|CF_STREAMER_FAST|CF_NEVER_WAIT|CF_WAKE_CONNECT|CF_READ_NOEXP); -+ s->req->flags &= ~(CF_SHUTW|CF_SHUTW_NOW|CF_AUTO_CONNECT|CF_WRITE_ERROR|CF_STREAMER|CF_STREAMER_FAST|CF_NEVER_WAIT|CF_WAKE_CONNECT); - s->rep->flags &= ~(CF_SHUTR|CF_SHUTR_NOW|CF_READ_ATTACHED|CF_READ_ERROR|CF_READ_NOEXP|CF_STREAMER|CF_STREAMER_FAST|CF_WRITE_PARTIAL|CF_NEVER_WAIT); - s->flags &= ~(SN_DIRECT|SN_ASSIGNED|SN_ADDR_SET|SN_BE_ASSIGNED|SN_FORCE_PRST|SN_IGNORE_PRST); - s->flags &= ~(SN_CURR_SESS|SN_REDIRECTABLE|SN_SRV_REUSED); -@@ -5305,13 +5305,6 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit - */ - msg->msg_state = HTTP_MSG_ERROR; - http_resync_states(s); -- -- if (req->flags & CF_READ_TIMEOUT) -- goto cli_timeout; -- -- if (req->flags & CF_WRITE_TIMEOUT) -- goto srv_timeout; -- - return 1; - } - -@@ -5478,11 +5471,6 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit - channel_auto_read(req); - } - -- /* if we received everything, we don't want to expire anymore */ -- if (msg->msg_state == HTTP_MSG_DONE) { -- req->flags |= CF_READ_NOEXP; -- req->rex = TICK_ETERNITY; -- } - return 0; - } - } -@@ -5592,68 +5580,6 @@ int http_request_forward_body(struct session *s, struct channel *req, int an_bit - s->flags |= SN_FINST_D; - } - return 0; -- -- cli_timeout: -- if (!(s->flags & SN_ERR_MASK)) -- s->flags |= SN_ERR_CLITO; -- -- if (!(s->flags & SN_FINST_MASK)) { -- if (txn->rsp.msg_state < HTTP_MSG_ERROR) -- s->flags |= SN_FINST_H; -- else -- s->flags |= SN_FINST_D; -- } -- -- if (txn->status > 0) { -- /* Don't send any error message if something was already sent */ -- stream_int_retnclose(req->prod, NULL); -- } -- else { -- txn->status = 408; -- stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_408)); -- } -- -- msg->msg_state = HTTP_MSG_ERROR; -- req->analysers = 0; -- s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */ -- -- session_inc_http_err_ctr(s); -- s->fe->fe_counters.failed_req++; -- s->be->be_counters.failed_req++; -- if (s->listener->counters) -- s->listener->counters->failed_req++; -- return 0; -- -- srv_timeout: -- if (!(s->flags & SN_ERR_MASK)) -- s->flags |= SN_ERR_SRVTO; -- -- if (!(s->flags & SN_FINST_MASK)) { -- if (txn->rsp.msg_state < HTTP_MSG_ERROR) -- s->flags |= SN_FINST_H; -- else -- s->flags |= SN_FINST_D; -- } -- -- if (txn->status > 0) { -- /* Don't send any error message if something was already sent */ -- stream_int_retnclose(req->prod, NULL); -- } -- else { -- txn->status = 504; -- stream_int_retnclose(req->prod, http_error_message(s, HTTP_ERR_504)); -- } -- -- msg->msg_state = HTTP_MSG_ERROR; -- req->analysers = 0; -- s->rep->analysers = 0; /* we're in data phase, we want to abort both directions */ -- -- s->be->be_counters.failed_resp++; -- if (objt_server(s->target)) { -- objt_server(s->target)->counters.failed_resp++; -- health_adjust(objt_server(s->target), HANA_STATUS_HTTP_READ_TIMEOUT); -- } -- return 0; - } - - /* This stream analyser waits for a complete HTTP response. It returns 1 if the -@@ -5821,11 +5747,8 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) - return 0; - } - -- /* read/write timeout : return a 504 to the client. -- * The write timeout may happen when we're uploading POST -- * data that the server is not consuming fast enough. -- */ -- else if (rep->flags & (CF_READ_TIMEOUT|CF_WRITE_TIMEOUT)) { -+ /* read timeout : return a 504 to the client. */ -+ else if (rep->flags & CF_READ_TIMEOUT) { - if (msg->err_pos >= 0) - http_capture_bad_message(&s->be->invalid_rep, s, msg, msg->msg_state, s->fe); - else if (txn->flags & TX_NOT_FIRST) -@@ -5921,12 +5844,6 @@ int http_wait_for_response(struct session *s, struct channel *rep, int an_bit) - return 0; - } - -- /* we don't want to expire on the server side first until the client -- * has sent all the expected message body. -- */ -- if (txn->req.msg_state >= HTTP_MSG_BODY && txn->req.msg_state < HTTP_MSG_DONE) -- rep->flags |= CF_READ_NOEXP; -- - channel_dont_close(rep); - rep->flags |= CF_READ_DONTWAIT; /* try to get back here ASAP */ - return 0; -@@ -6742,12 +6659,6 @@ int http_response_forward_body(struct session *s, struct channel *res, int an_bi - } - return 1; - } -- -- /* if we received everything, we don't want to expire anymore */ -- if (msg->msg_state == HTTP_MSG_DONE) { -- res->flags |= CF_READ_NOEXP; -- res->rex = TICK_ETERNITY; -- } - return 0; - } - } -diff --git a/src/session.c b/src/session.c -index f828d9c..e26f5ad 100644 ---- a/src/session.c -+++ b/src/session.c -@@ -1636,7 +1636,6 @@ struct task *process_session(struct task *t) - unsigned int rq_prod_last, rq_cons_last; - unsigned int rp_cons_last, rp_prod_last; - unsigned int req_ana_back; -- unsigned int rq_oneshot, rp_oneshot; - - //DPRINTF(stderr, "%s:%d: cs=%d ss=%d(%d) rqf=0x%08x rpf=0x%08x\n", __FUNCTION__, __LINE__, - // s->si[0].state, s->si[1].state, s->si[1].err_type, s->req->flags, s->rep->flags); -@@ -1644,13 +1643,9 @@ struct task *process_session(struct task *t) - /* this data may be no longer valid, clear it */ - memset(&s->txn.auth, 0, sizeof(s->txn.auth)); - -- /* These flags must explicitly be set every time by the analysers who -- * need them, but we won't always call them (eg: during a connection -- * retry). So we need to keep them and only clear them if we're sure -- * to call the analysers. -- */ -- rq_oneshot = s->req->flags & (CF_READ_NOEXP | CF_WAKE_WRITE); -- rp_oneshot = s->rep->flags & (CF_READ_NOEXP | CF_WAKE_WRITE); -+ /* This flag must explicitly be set every time */ -+ s->req->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE); -+ s->rep->flags &= ~(CF_READ_NOEXP|CF_WAKE_WRITE); - - /* Keep a copy of req/rep flags so that we can detect shutdowns */ - rqf_last = s->req->flags & ~CF_MASK_ANALYSER; -@@ -1831,8 +1826,6 @@ struct task *process_session(struct task *t) - s->si[1].state != rq_cons_last) { - unsigned int flags = s->req->flags; - -- s->req->flags &= ~rq_oneshot; -- rq_oneshot = 0; - if (s->req->prod->state >= SI_ST_EST) { - int max_loops = global.tune.maxpollevents; - unsigned int ana_list; -@@ -1986,13 +1979,11 @@ struct task *process_session(struct task *t) - /* Analyse response */ - - if (((s->rep->flags & ~rpf_last) & CF_MASK_ANALYSER) || -- ((s->rep->flags ^ rpf_last) & CF_MASK_STATIC) || -- s->si[0].state != rp_cons_last || -- s->si[1].state != rp_prod_last) { -+ (s->rep->flags ^ rpf_last) & CF_MASK_STATIC || -+ s->si[0].state != rp_cons_last || -+ s->si[1].state != rp_prod_last) { - unsigned int flags = s->rep->flags; - -- s->rep->flags &= ~rp_oneshot; -- rp_oneshot = 0; - if ((s->rep->flags & CF_MASK_ANALYSER) && - (s->rep->analysers & AN_REQ_WAIT_HTTP)) { - /* Due to HTTP pipelining, the HTTP request analyser might be waiting -@@ -2186,9 +2177,6 @@ struct task *process_session(struct task *t) - channel_auto_close(s->req); - buffer_flush(s->req->buf); - -- s->req->flags &= ~rq_oneshot; -- rq_oneshot = 0; -- - /* We'll let data flow between the producer (if still connected) - * to the consumer (which might possibly not be connected yet). - */ -@@ -2344,9 +2332,6 @@ struct task *process_session(struct task *t) - channel_auto_close(s->rep); - buffer_flush(s->rep->buf); - -- s->rep->flags &= ~rp_oneshot; -- rp_oneshot = 0; -- - /* We'll let data flow between the producer (if still connected) - * to the consumer. - */ -@@ -2496,6 +2481,20 @@ struct task *process_session(struct task *t) - s->si[0].flags &= ~(SI_FL_ERR|SI_FL_EXP); - s->si[1].flags &= ~(SI_FL_ERR|SI_FL_EXP); - -+ /* Trick: if a request is being waiting for the server to respond, -+ * and if we know the server can timeout, we don't want the timeout -+ * to expire on the client side first, but we're still interested -+ * in passing data from the client to the server (eg: POST). Thus, -+ * we can cancel the client's request timeout if the server's -+ * request timeout is set and the server has not yet sent a response. -+ */ -+ -+ if ((s->rep->flags & (CF_AUTO_CLOSE|CF_SHUTR)) == 0 && -+ (tick_isset(s->req->wex) || tick_isset(s->rep->rex))) { -+ s->req->flags |= CF_READ_NOEXP; -+ s->req->rex = TICK_ETERNITY; -+ } -+ - /* When any of the stream interfaces is attached to an applet, - * we have to call it here. Note that this one may wake the - * task up again. If at least one applet was called, the current --- -1.8.5.5 - diff --git a/net/haproxy/patches/0006-BUG-MINOR-logs-properly-initialize-and-count-log-soc.patch b/net/haproxy/patches/0006-BUG-MINOR-logs-properly-initialize-and-count-log-soc.patch deleted file mode 100644 index 0c2e1d4276..0000000000 --- a/net/haproxy/patches/0006-BUG-MINOR-logs-properly-initialize-and-count-log-soc.patch +++ /dev/null @@ -1,110 +0,0 @@ -From 53045692e1a106016b84b63b86fbe4822e4ec755 Mon Sep 17 00:00:00 2001 -From: Willy Tarreau -Date: Mon, 23 Jun 2014 18:07:15 +0200 -Subject: [PATCH 6/6] BUG/MINOR: logs: properly initialize and count log - sockets - -Commit 81ae195 ("[MEDIUM] add support for logging via a UNIX socket") -merged in 1.3.14 introduced a few minor issues with log sockets. All -of them happen only when a failure is encountered when trying to set -up the logging socket (eg: socket family is not available or is -temporarily short in resources). - -The first socket which experiences an error causes the socket setup -loop to abort, possibly preventing any log from being sent if it was -the first logger. The second issue is that if this socket finally -succeeds after a second attempt, errors are reported for the wrong -logger (eg: logger #1 failed instead of #2). The last point is that -we now have multiple loggers, and it's a waste of time to walk over -their list for every log while they're almost always properly set up. - -So in order to fix all this, let's merge the two lists. If a logger -experiences an error, it simply sends an alert and skips to the next -one. That way they don't prevent messages from being sent and are -all properly accounted for. -(cherry picked from commit c7c7be21bf6c7e9afd897d4bf451dc450187a77e) ---- - src/log.c | 49 +++++++++++++++++-------------------------------- - 1 file changed, 17 insertions(+), 32 deletions(-) - -diff --git a/src/log.c b/src/log.c -index eb7ccb1..114ab7b 100644 ---- a/src/log.c -+++ b/src/log.c -@@ -813,37 +813,6 @@ void __send_log(struct proxy *p, int level, char *message, size_t size) - - message[size - 1] = '\n'; - -- /* Lazily set up syslog sockets for protocol families of configured -- * syslog servers. */ -- nblogger = 0; -- list_for_each_entry(tmp, logsrvs, list) { -- const struct logsrv *logsrv = tmp; -- int proto, *plogfd; -- -- if (logsrv->addr.ss_family == AF_UNIX) { -- proto = 0; -- plogfd = &logfdunix; -- } else { -- proto = IPPROTO_UDP; -- plogfd = &logfdinet; -- } -- if (*plogfd >= 0) { -- /* socket already created. */ -- continue; -- } -- if ((*plogfd = socket(logsrv->addr.ss_family, SOCK_DGRAM, -- proto)) < 0) { -- Alert("socket for logger #%d failed: %s (errno=%d)\n", -- nblogger + 1, strerror(errno), errno); -- return; -- } -- /* we don't want to receive anything on this socket */ -- setsockopt(*plogfd, SOL_SOCKET, SO_RCVBUF, &zero, sizeof(zero)); -- /* does nothing under Linux, maybe needed for others */ -- shutdown(*plogfd, SHUT_RD); -- nblogger++; -- } -- - /* Send log messages to syslog server. */ - nblogger = 0; - list_for_each_entry(tmp, logsrvs, list) { -@@ -852,10 +821,27 @@ void __send_log(struct proxy *p, int level, char *message, size_t size) - &logfdunix : &logfdinet; - int sent; - -+ nblogger++; -+ - /* we can filter the level of the messages that are sent to each logger */ - if (level > logsrv->level) - continue; - -+ if (unlikely(*plogfd < 0)) { -+ /* socket not successfully initialized yet */ -+ int proto = logsrv->addr.ss_family == AF_UNIX ? 0 : IPPROTO_UDP; -+ -+ if ((*plogfd = socket(logsrv->addr.ss_family, SOCK_DGRAM, proto)) < 0) { -+ Alert("socket for logger #%d failed: %s (errno=%d)\n", -+ nblogger, strerror(errno), errno); -+ continue; -+ } -+ /* we don't want to receive anything on this socket */ -+ setsockopt(*plogfd, SOL_SOCKET, SO_RCVBUF, &zero, sizeof(zero)); -+ /* does nothing under Linux, maybe needed for others */ -+ shutdown(*plogfd, SHUT_RD); -+ } -+ - /* For each target, we may have a different facility. - * We can also have a different log level for each message. - * This induces variations in the message header length. -@@ -879,7 +865,6 @@ void __send_log(struct proxy *p, int level, char *message, size_t size) - Alert("sendto logger #%d failed: %s (errno=%d)\n", - nblogger, strerror(errno), errno); - } -- nblogger++; - } - } - --- -1.8.5.5 -