haproxy: Update all patches for HAProxy v2.0.3

- Add new patches (see https://www.haproxy.org/bugs/bugs-2.0.3.html)
- Raise PKG_RELEASE to 2

Signed-off-by: Christian Lachner <gladiac@gmail.com>
This commit is contained in:
Christian Lachner 2019-08-05 09:24:49 +02:00
parent 568b1b3a2c
commit fe2ed1c398
24 changed files with 2021 additions and 1 deletions

View File

@ -11,7 +11,7 @@ include $(TOPDIR)/rules.mk
PKG_NAME:=haproxy
PKG_VERSION:=2.0.3
PKG_RELEASE:=1
PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.haproxy.org/download/2.0/src

View File

@ -0,0 +1,302 @@
commit 937604b4cfccddd607b8d4883815c4e3f9ab70d0
Author: Willy Tarreau <w@1wt.eu>
Date: Wed Jul 24 16:45:02 2019 +0200
BUG/MEDIUM: protocols: add a global lock for the init/deinit stuff
Dragan Dosen found that the listeners lock is not sufficient to protect
the listeners list when proxies are stopping because the listeners are
also unlinked from the protocol list, and under certain situations like
bombing with soft-stop signals or shutting down many frontends in parallel
from multiple CLI connections, it could be possible to provoke multiple
instances of delete_listener() to be called in parallel for different
listeners, thus corrupting the protocol lists.
Such operations are pretty rare, they are performed once per proxy upon
startup and once per proxy on shut down. Thus there is no point trying
to optimize anything and we can use a global lock to protect the protocol
lists during these manipulations.
This fix (or a variant) will have to be backported as far as 1.8.
(cherry picked from commit daacf3664506d56a1f3b050ccba504886a18b12a)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/proto/protocol.h b/include/proto/protocol.h
index 7bbebb8e..f25f77f0 100644
--- a/include/proto/protocol.h
+++ b/include/proto/protocol.h
@@ -23,9 +23,11 @@
#define _PROTO_PROTOCOL_H
#include <sys/socket.h>
+#include <common/hathreads.h>
#include <types/protocol.h>
extern struct protocol *__protocol_by_family[AF_CUST_MAX];
+__decl_hathreads(extern HA_SPINLOCK_T proto_lock);
/* Registers the protocol <proto> */
void protocol_register(struct protocol *proto);
diff --git a/include/types/protocol.h b/include/types/protocol.h
index 1d3404b9..f38baeb9 100644
--- a/include/types/protocol.h
+++ b/include/types/protocol.h
@@ -80,9 +80,9 @@ struct protocol {
int (*pause)(struct listener *l); /* temporarily pause this listener for a soft restart */
void (*add)(struct listener *l, int port); /* add a listener for this protocol and port */
- struct list listeners; /* list of listeners using this protocol */
- int nb_listeners; /* number of listeners */
- struct list list; /* list of registered protocols */
+ struct list listeners; /* list of listeners using this protocol (under proto_lock) */
+ int nb_listeners; /* number of listeners (under proto_lock) */
+ struct list list; /* list of registered protocols (under proto_lock) */
};
#define CONNECT_HAS_DATA 0x00000001 /* There's data available to be sent */
diff --git a/src/listener.c b/src/listener.c
index 40a774ed..b5fe2ac2 100644
--- a/src/listener.c
+++ b/src/listener.c
@@ -433,6 +433,9 @@ static void limit_listener(struct listener *l, struct list *list)
* used as a protocol's generic enable_all() primitive, for use after the
* fork(). It puts the listeners into LI_READY or LI_FULL states depending on
* their number of connections. It always returns ERR_NONE.
+ *
+ * Must be called with proto_lock held.
+ *
*/
int enable_all_listeners(struct protocol *proto)
{
@@ -447,6 +450,9 @@ int enable_all_listeners(struct protocol *proto)
* the polling lists when they are in the LI_READY or LI_FULL states. It is
* intended to be used as a protocol's generic disable_all() primitive. It puts
* the listeners into LI_LISTEN, and always returns ERR_NONE.
+ *
+ * Must be called with proto_lock held.
+ *
*/
int disable_all_listeners(struct protocol *proto)
{
@@ -516,6 +522,9 @@ void unbind_listener_no_close(struct listener *listener)
/* This function closes all listening sockets bound to the protocol <proto>,
* and the listeners end in LI_ASSIGNED state if they were higher. It does not
* detach them from the protocol. It always returns ERR_NONE.
+ *
+ * Must be called with proto_lock held.
+ *
*/
int unbind_all_listeners(struct protocol *proto)
{
@@ -580,14 +589,19 @@ int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
* number of listeners is updated, as well as the global number of listeners
* and jobs. Note that the listener must have previously been unbound. This
* is the generic function to use to remove a listener.
+ *
+ * Will grab the proto_lock.
+ *
*/
void delete_listener(struct listener *listener)
{
HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
if (listener->state == LI_ASSIGNED) {
listener->state = LI_INIT;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
LIST_DEL(&listener->proto_list);
listener->proto->nb_listeners--;
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
_HA_ATOMIC_SUB(&jobs, 1);
_HA_ATOMIC_SUB(&listeners, 1);
}
diff --git a/src/proto_sockpair.c b/src/proto_sockpair.c
index a4faa370..e7dd670d 100644
--- a/src/proto_sockpair.c
+++ b/src/proto_sockpair.c
@@ -80,6 +80,9 @@ INITCALL1(STG_REGISTER, protocol_register, &proto_sockpair);
/* Add <listener> to the list of sockpair listeners (port is ignored). The
* listener's state is automatically updated from LI_INIT to LI_ASSIGNED.
* The number of listeners for the protocol is updated.
+ *
+ * Must be called with proto_lock held.
+ *
*/
static void sockpair_add_listener(struct listener *listener, int port)
{
@@ -97,6 +100,8 @@ static void sockpair_add_listener(struct listener *listener, int port)
* loose them across the fork(). A call to uxst_enable_listeners() is needed
* to complete initialization.
*
+ * Must be called with proto_lock held.
+ *
* The return value is composed from ERR_NONE, ERR_RETRYABLE and ERR_FATAL.
*/
static int sockpair_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
diff --git a/src/proto_tcp.c b/src/proto_tcp.c
index 64ffb83c..bcbe27a7 100644
--- a/src/proto_tcp.c
+++ b/src/proto_tcp.c
@@ -1103,6 +1103,9 @@ int tcp_bind_listener(struct listener *listener, char *errmsg, int errlen)
* The sockets will be registered but not added to any fd_set, in order not to
* loose them across the fork(). A call to enable_all_listeners() is needed
* to complete initialization. The return value is composed from ERR_*.
+ *
+ * Must be called with proto_lock held.
+ *
*/
static int tcp_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
{
@@ -1121,6 +1124,9 @@ static int tcp_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
/* Add <listener> to the list of tcpv4 listeners, on port <port>. The
* listener's state is automatically updated from LI_INIT to LI_ASSIGNED.
* The number of listeners for the protocol is updated.
+ *
+ * Must be called with proto_lock held.
+ *
*/
static void tcpv4_add_listener(struct listener *listener, int port)
{
@@ -1136,6 +1142,9 @@ static void tcpv4_add_listener(struct listener *listener, int port)
/* Add <listener> to the list of tcpv6 listeners, on port <port>. The
* listener's state is automatically updated from LI_INIT to LI_ASSIGNED.
* The number of listeners for the protocol is updated.
+ *
+ * Must be called with proto_lock held.
+ *
*/
static void tcpv6_add_listener(struct listener *listener, int port)
{
diff --git a/src/proto_uxst.c b/src/proto_uxst.c
index 66093af6..7263240f 100644
--- a/src/proto_uxst.c
+++ b/src/proto_uxst.c
@@ -379,6 +379,9 @@ static int uxst_unbind_listener(struct listener *listener)
/* Add <listener> to the list of unix stream listeners (port is ignored). The
* listener's state is automatically updated from LI_INIT to LI_ASSIGNED.
* The number of listeners for the protocol is updated.
+ *
+ * Must be called with proto_lock held.
+ *
*/
static void uxst_add_listener(struct listener *listener, int port)
{
@@ -594,6 +597,8 @@ static int uxst_connect_server(struct connection *conn, int flags)
* loose them across the fork(). A call to uxst_enable_listeners() is needed
* to complete initialization.
*
+ * Must be called with proto_lock held.
+ *
* The return value is composed from ERR_NONE, ERR_RETRYABLE and ERR_FATAL.
*/
static int uxst_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
@@ -613,6 +618,9 @@ static int uxst_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
/* This function stops all listening UNIX sockets bound to the protocol
* <proto>. It does not detaches them from the protocol.
* It always returns ERR_NONE.
+ *
+ * Must be called with proto_lock held.
+ *
*/
static int uxst_unbind_listeners(struct protocol *proto)
{
diff --git a/src/protocol.c b/src/protocol.c
index 96e01c82..ac45cf2e 100644
--- a/src/protocol.c
+++ b/src/protocol.c
@@ -18,18 +18,26 @@
#include <common/mini-clist.h>
#include <common/standard.h>
-#include <types/protocol.h>
+#include <proto/protocol.h>
/* List head of all registered protocols */
static struct list protocols = LIST_HEAD_INIT(protocols);
struct protocol *__protocol_by_family[AF_CUST_MAX] = { };
+/* This is the global spinlock we may need to register/unregister listeners or
+ * protocols. Its main purpose is in fact to serialize the rare stop/deinit()
+ * phases.
+ */
+__decl_spinlock(proto_lock);
+
/* Registers the protocol <proto> */
void protocol_register(struct protocol *proto)
{
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
LIST_ADDQ(&protocols, &proto->list);
if (proto->sock_domain >= 0 && proto->sock_domain < AF_CUST_MAX)
__protocol_by_family[proto->sock_domain] = proto;
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
}
/* Unregisters the protocol <proto>. Note that all listeners must have
@@ -37,8 +45,10 @@ void protocol_register(struct protocol *proto)
*/
void protocol_unregister(struct protocol *proto)
{
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
LIST_DEL(&proto->list);
LIST_INIT(&proto->list);
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
}
/* binds all listeners of all registered protocols. Returns a composition
@@ -50,6 +60,7 @@ int protocol_bind_all(char *errmsg, int errlen)
int err;
err = 0;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
list_for_each_entry(proto, &protocols, list) {
if (proto->bind_all) {
err |= proto->bind_all(proto, errmsg, errlen);
@@ -57,6 +68,7 @@ int protocol_bind_all(char *errmsg, int errlen)
break;
}
}
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
return err;
}
@@ -71,11 +83,13 @@ int protocol_unbind_all(void)
int err;
err = 0;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
list_for_each_entry(proto, &protocols, list) {
if (proto->unbind_all) {
err |= proto->unbind_all(proto);
}
}
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
return err;
}
@@ -89,11 +103,13 @@ int protocol_enable_all(void)
int err;
err = 0;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
list_for_each_entry(proto, &protocols, list) {
if (proto->enable_all) {
err |= proto->enable_all(proto);
}
}
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
return err;
}
@@ -107,11 +123,13 @@ int protocol_disable_all(void)
int err;
err = 0;
+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
list_for_each_entry(proto, &protocols, list) {
if (proto->disable_all) {
err |= proto->disable_all(proto);
}
}
+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
return err;
}

View File

@ -0,0 +1,64 @@
commit 6d79cedaaa4a16b2f42d2bf2bc25772a51354e91
Author: Willy Tarreau <w@1wt.eu>
Date: Wed Jul 24 17:42:44 2019 +0200
BUG/MINOR: proxy: always lock stop_proxy()
There is one unprotected call to stop_proxy() from the manage_proxy()
task, so there is a single caller by definition, but there is also
another such call from the CLI's "shutdown frontend" parser. This
one does it under the proxy's lock but the first one doesn't use it.
Thus it is theorically possible to corrupt the list of listeners in a
proxy by issuing "shutdown frontend" and SIGUSR1 exactly at the same
time. While it sounds particularly contrived or stupid, it could
possibly happen with automated tools that would send actions via
various channels. This could cause the process to loop forever or
to crash and thus stop faster than expected.
This might be backported as far as 1.8.
(cherry picked from commit 3de3cd4d9761324b31d23eb2c4a9434ed33801b8)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/proxy.c b/src/proxy.c
index f669ebf1..ae761ead 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -1258,13 +1258,16 @@ void zombify_proxy(struct proxy *p)
* to be called when going down in order to release the ports so that another
* process may bind to them. It must also be called on disabled proxies at the
* end of start-up. If all listeners are closed, the proxy is set to the
- * PR_STSTOPPED state.
+ * PR_STSTOPPED state. The function takes the proxy's lock so it's safe to
+ * call from multiple places.
*/
void stop_proxy(struct proxy *p)
{
struct listener *l;
int nostop = 0;
+ HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
+
list_for_each_entry(l, &p->conf.listeners, by_fe) {
if (l->options & LI_O_NOSTOP) {
HA_ATOMIC_ADD(&unstoppable_jobs, 1);
@@ -1278,6 +1281,8 @@ void stop_proxy(struct proxy *p)
}
if (!nostop)
p->state = PR_STSTOPPED;
+
+ HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
}
/* This function resumes listening on the specified proxy. It scans all of its
@@ -2110,10 +2115,7 @@ static int cli_parse_shutdown_frontend(char **args, char *payload, struct appctx
send_log(px, LOG_WARNING, "Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n",
px->id, px->fe_counters.cum_conn, px->be_counters.cum_conn);
- HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
stop_proxy(px);
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
-
return 1;
}

View File

@ -0,0 +1,33 @@
commit a4ca26661f95a60974fb13a78b1a0c89f9c09ea9
Author: Willy Tarreau <w@1wt.eu>
Date: Thu Jul 25 07:53:56 2019 +0200
BUILD: threads: add the definition of PROTO_LOCK
This one was added by commit daacf3664 ("BUG/MEDIUM: protocols: add a
global lock for the init/deinit stuff") but I forgot to add it to the
include file, breaking DEBUG_THREAD.
(cherry picked from commit d6e0c03384cab2c72fb6ab841420045108ea4e6f)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/common/hathreads.h b/include/common/hathreads.h
index a7c8dc93..b05215bd 100644
--- a/include/common/hathreads.h
+++ b/include/common/hathreads.h
@@ -562,6 +562,7 @@ enum lock_label {
AUTH_LOCK,
LOGSRV_LOCK,
DICT_LOCK,
+ PROTO_LOCK,
OTHER_LOCK,
LOCK_LABELS
};
@@ -679,6 +680,7 @@ static inline const char *lock_label(enum lock_label label)
case AUTH_LOCK: return "AUTH";
case LOGSRV_LOCK: return "LOGSRV";
case DICT_LOCK: return "DICT";
+ case PROTO_LOCK: return "PROTO";
case OTHER_LOCK: return "OTHER";
case LOCK_LABELS: break; /* keep compiler happy */
};

View File

@ -0,0 +1,32 @@
commit 974c6916ba2f7efc83193bb8c04e95294ca21112
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Fri Jul 26 13:52:13 2019 +0200
BUG/MEDIUM: lb-chash: Fix the realloc() when the number of nodes is increased
When the number of nodes is increased because the server weight is changed, the
nodes array must be realloc. But its new size is not correctly set. Only the
total number of nodes is used to set the new size. But it must also depends on
the size of a node. It must be the total nomber of nodes times the size of a
node.
This issue was reported on Github (#189).
This patch must be backported to all versions since the 1.6.
(cherry picked from commit 366ad86af72c455cc958943913cb2de20eefee71)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/lb_chash.c b/src/lb_chash.c
index a35351e9..0bf4e81a 100644
--- a/src/lb_chash.c
+++ b/src/lb_chash.c
@@ -84,7 +84,7 @@ static inline void chash_queue_dequeue_srv(struct server *s)
* increased the weight beyond the original weight
*/
if (s->lb_nodes_tot < s->next_eweight) {
- struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight);
+ struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes));
if (new_nodes) {
unsigned int j;

View File

@ -0,0 +1,32 @@
commit 21a796cb83c29ee276feb04649a1b18214bbdee0
Author: Olivier Houchard <ohouchard@haproxy.com>
Date: Fri Jul 26 14:54:34 2019 +0200
BUG/MEDIUM: streams: Don't switch the SI to SI_ST_DIS if we have data to send.
In sess_established(), don't immediately switch the backend stream_interface
to SI_ST_DIS if we only got a SHUTR. We may still have something to send,
ie if the request is a POST, and we should be switched to SI_ST8DIS later
when the shutw will happen.
This should be backported to 2.0 and 1.9.
(cherry picked from commit 7859526fd6ce7ea33e20b7e532b21aa2465cb11d)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/stream.c b/src/stream.c
index a5c5f45c..64875c80 100644
--- a/src/stream.c
+++ b/src/stream.c
@@ -954,8 +954,9 @@ static void sess_establish(struct stream *s)
si_chk_rcv(si);
}
req->wex = TICK_ETERNITY;
- /* If we managed to get the whole response, switch to SI_ST_DIS now. */
- if (rep->flags & CF_SHUTR)
+ /* If we managed to get the whole response, and we don't have anything
+ * left to send, or can't, switch to SI_ST_DIS now. */
+ if (rep->flags & (CF_SHUTR | CF_SHUTW))
si->state = SI_ST_DIS;
}

View File

@ -0,0 +1,42 @@
commit 487b38e86c08431bc5f48aac72c8d753ee23cb03
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jul 26 15:10:39 2019 +0200
BUG/MINOR: log: make sure writev() is not interrupted on a file output
Since 1.9 we support sending logs to various non-blocking outputs like
stdou/stderr or flies, by using writev() which guarantees that it only
returns after having written everything or nothing. However the syscall
may be interrupted while doing so, and this is visible when writing to
a tty during debug sessions, as some logs occasionally appear interleaved
if an xterm or SSH connection is not very fast. Performance here is not a
critical concern, log correctness is. Let's simply take the logger's lock
around the writev() call to prevent multiple senders from stepping onto
each other's toes.
This may be backported to 2.0 and 1.9.
(cherry picked from commit 9fbcb7e2e9c32659ab11927394fec2e160be2d0b)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/log.c b/src/log.c
index ef999d13..99f185e4 100644
--- a/src/log.c
+++ b/src/log.c
@@ -1672,8 +1672,15 @@ send:
iovec[7].iov_len = 1;
if (logsrv->addr.ss_family == AF_UNSPEC) {
- /* the target is a direct file descriptor */
+ /* the target is a direct file descriptor. While writev() guarantees
+ * to write everything, it doesn't guarantee that it will not be
+ * interrupted while doing so. This occasionally results in interleaved
+ * messages when the output is a tty, hence the lock. There's no real
+ * performance concern here for such type of output.
+ */
+ HA_SPIN_LOCK(LOGSRV_LOCK, &logsrv->lock);
sent = writev(*plogfd, iovec, 8);
+ HA_SPIN_UNLOCK(LOGSRV_LOCK, &logsrv->lock);
}
else {
msghdr.msg_name = (struct sockaddr *)&logsrv->addr;

View File

@ -0,0 +1,101 @@
commit 8de6badd32fb584d60733a6236113edba00f8701
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Jul 26 15:21:54 2019 +0200
DOC: improve the wording in CONTRIBUTING about how to document a bug fix
Insufficiently described bug fixes are still too frequent. It's a real
pain to create each new maintenance release, as 3/4 of the time is spent
trying to guess what problem a patch fixes, which is already important
in order to decide whether to pick the fix or not, but is even more
capital in order to write understandable release notes.
Christopher rightfully demands that a patch tagged "BUG" MUST ABSOLUTELY
describe the problem and why this problem is a bug. Describing the fix
is one thing but if the bug is unknown, why would there be a fix ? How
can a stable maintainer be convinced to take a fix if its author didn't
care about checking whether it was a real bug ? This patch tries to
explain a bit better what really needs to appear in the commit message
and how to describe a bug.
To be backported to all relevant stable versions.
(cherry picked from commit 41f638c1eb8167bb473a6c8811d7fd70d7c06e07)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/CONTRIBUTING b/CONTRIBUTING
index 0fcd921e..201e122d 100644
--- a/CONTRIBUTING
+++ b/CONTRIBUTING
@@ -454,7 +454,18 @@ do not think about them anymore after a few patches.
11) Real commit messages please!
- Please properly format your commit messages. To get an idea, just run
+ The commit message is how you're trying to convince a maintainer to adopt
+ your work and maintain it as long as possible. A dirty commit message almost
+ always comes with dirty code. Too short a commit message indicates that too
+ short an analysis was done and that side effects are extremely likely to be
+ encountered. It's the maintainer's job to decide to accept this work in its
+ current form or not, with the known constraints. Some patches which rework
+ architectural parts or fix sensitive bugs come with 20-30 lines of design
+ explanations, limitations, hypothesis or even doubts, and despite this it
+ happens when reading them 6 months later while trying to identify a bug that
+ developers still miss some information about corner cases.
+
+ So please properly format your commit messages. To get an idea, just run
"git log" on the file you've just modified. Patches always have the format
of an e-mail made of a subject, a description and the actual patch. If you
are sending a patch as an e-mail formatted this way, it can quickly be
@@ -506,9 +517,17 @@ do not think about them anymore after a few patches.
But in any case, it is important that there is a clean description of what
the patch does, the motivation for what it does, why it's the best way to do
- it, its impacts, and what it does not yet cover. Also, in HAProxy, like many
- projects which take a great care of maintaining stable branches, patches are
- reviewed later so that some of them can be backported to stable releases.
+ it, its impacts, and what it does not yet cover. And this is particularly
+ important for bugs. A patch tagged "BUG" must absolutely explain what the
+ problem is, why it is considered as a bug. Anybody, even non-developers,
+ should be able to tell whether or not a patch is likely to address an issue
+ they are facing. Indicating what the code will do after the fix doesn't help
+ if it does not say what problem is encountered without the patch. Note that
+ in some cases the bug is purely theorical and observed by reading the code.
+ In this case it's perfectly fine to provide an estimate about possible
+ effects. Also, in HAProxy, like many projects which take a great care of
+ maintaining stable branches, patches are reviewed later so that some of them
+ can be backported to stable releases.
While reviewing hundreds of patches can seem cumbersome, with a proper
formatting of the subject line it actually becomes very easy. For example,
@@ -630,13 +649,23 @@ patch types include :
- BUG fix for a bug. The severity of the bug should also be indicated
when known. Similarly, if a backport is needed to older versions,
- it should be indicated on the last line of the commit message. If
- the bug has been identified as a regression brought by a specific
- patch or version, this indication will be appreciated too. New
- maintenance releases are generally emitted when a few of these
- patches are merged. If the bug is a vulnerability for which a CVE
- identifier was assigned before you publish the fix, you can mention
- it in the commit message, it will help distro maintainers.
+ it should be indicated on the last line of the commit message. The
+ commit message MUST ABSOLUTELY describe the problem and its impact
+ to non-developers. Any user must be able to guess if this patch is
+ likely to fix a problem they are facing. Even if the bug was
+ discovered by accident while reading the code or running an
+ automated tool, it is mandatory to try to estimate what potential
+ issue it might cause and under what circumstances. There may even
+ be security implications sometimes so a minimum analysis is really
+ required. Also please think about stable maintainers who have to
+ build the release notes, they need to have enough input about the
+ bug's impact to explain it. If the bug has been identified as a
+ regression brought by a specific patch or version, this indication
+ will be appreciated too. New maintenance releases are generally
+ emitted when a few of these patches are merged. If the bug is a
+ vulnerability for which a CVE identifier was assigned before you
+ publish the fix, you can mention it in the commit message, it will
+ help distro maintainers.
- CLEANUP code cleanup, silence of warnings, etc... theoretically no impact.
These patches will rarely be seen in stable branches, though they

View File

@ -0,0 +1,49 @@
commit 72c692701ab4197f1f8ec7594b7e8ef5082b9d9e
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Fri Jul 26 16:40:24 2019 +0200
BUG/MINOR: hlua/htx: Reset channels analyzers when txn:done() is called
For HTX streams, when txn:done() is called, the work is delegated to the
function http_reply_and_close(). But it is not enough. The channel's analyzers
must also be reset. Otherwise, some analyzers may still be called while
processing should be aborted.
For instance, if the function is called from an http-request rules on the
frontend, request analyzers on the backend side are still called. So we may try
to add an header to the request, while this one was already reset.
This patch must be backported to 2.0 and 1.9.
(cherry picked from commit fe6a71b8e08234dbe03fbd2fa3017590681479df)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/hlua.c b/src/hlua.c
index 23d2aa04..f9d1d699 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -5996,8 +5996,12 @@ __LJMP static int hlua_txn_done(lua_State *L)
ic = &htxn->s->req;
oc = &htxn->s->res;
- if (IS_HTX_STRM(htxn->s))
- htx_reply_and_close(htxn->s, 0, NULL);
+ if (IS_HTX_STRM(htxn->s)) {
+ htxn->s->txn->status = 0;
+ http_reply_and_close(htxn->s, 0, NULL);
+ ic->analysers &= AN_REQ_FLT_END;
+ oc->analysers &= AN_RES_FLT_END;
+ }
else {
if (htxn->s->txn) {
/* HTTP mode, let's stay in sync with the stream */
@@ -6031,6 +6035,9 @@ __LJMP static int hlua_txn_done(lua_State *L)
ic->analysers = 0;
}
+ if (!(htxn->s->flags & SF_ERR_MASK)) // this is not really an error but it is
+ htxn->s->flags |= SF_ERR_LOCAL; // to mark that it comes from the proxy
+
hlua->flags |= HLUA_STOP;
WILL_LJMP(hlua_done(L));
return 0;

View File

@ -0,0 +1,201 @@
commit dc2ee27c7a1908ca3157a10ad131f13644bcaea3
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Fri Jul 26 16:17:01 2019 +0200
BUG/MEDIUM: hlua: Check the calling direction in lua functions of the HTTP class
It is invalid to manipulate responses from http-request rules or to manipulate
requests from http-response rules. When http-request rules are evaluated, the
connection to server is not yet established, so there is no response at all. And
when http-response rules are evaluated, the request has already been sent to the
server.
Now, the calling direction is checked. So functions "txn.http:req_*" can now
only be called from http-request rules and the functions "txn.http:res_*" can
only be called from http-response rules.
This issue was reported on Github (#190).
This patch must be backported to all versions since the 1.6.
(cherry picked from commit 84a6d5bc217a418db8efc4e76a0a32860db2c608)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/hlua.c b/src/hlua.c
index f9d1d699..21351cd6 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -5346,6 +5346,9 @@ __LJMP static int hlua_http_req_get_headers(lua_State *L)
MAY_LJMP(check_args(L, 1, "req_get_headers"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
return hlua_http_get_headers(L, htxn, &htxn->s->txn->req);
}
@@ -5356,6 +5359,9 @@ __LJMP static int hlua_http_res_get_headers(lua_State *L)
MAY_LJMP(check_args(L, 1, "res_get_headers"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_RES)
+ WILL_LJMP(lua_error(L));
+
return hlua_http_get_headers(L, htxn, &htxn->s->txn->rsp);
}
@@ -5393,6 +5399,9 @@ __LJMP static int hlua_http_req_rep_hdr(lua_State *L)
MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_HDR));
}
@@ -5403,6 +5412,9 @@ __LJMP static int hlua_http_res_rep_hdr(lua_State *L)
MAY_LJMP(check_args(L, 4, "res_rep_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_RES)
+ WILL_LJMP(lua_error(L));
+
return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_HDR));
}
@@ -5413,6 +5425,9 @@ __LJMP static int hlua_http_req_rep_val(lua_State *L)
MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_VAL));
}
@@ -5423,6 +5438,9 @@ __LJMP static int hlua_http_res_rep_val(lua_State *L)
MAY_LJMP(check_args(L, 4, "res_rep_val"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_RES)
+ WILL_LJMP(lua_error(L));
+
return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_VAL));
}
@@ -5462,6 +5480,9 @@ __LJMP static int hlua_http_req_del_hdr(lua_State *L)
MAY_LJMP(check_args(L, 2, "req_del_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
return hlua_http_del_hdr(L, htxn, &htxn->s->txn->req);
}
@@ -5469,9 +5490,12 @@ __LJMP static int hlua_http_res_del_hdr(lua_State *L)
{
struct hlua_txn *htxn;
- MAY_LJMP(check_args(L, 2, "req_del_hdr"));
+ MAY_LJMP(check_args(L, 2, "res_del_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_RES)
+ WILL_LJMP(lua_error(L));
+
return hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp);
}
@@ -5523,6 +5547,9 @@ __LJMP static int hlua_http_req_add_hdr(lua_State *L)
MAY_LJMP(check_args(L, 3, "req_add_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req);
}
@@ -5533,6 +5560,9 @@ __LJMP static int hlua_http_res_add_hdr(lua_State *L)
MAY_LJMP(check_args(L, 3, "res_add_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_RES)
+ WILL_LJMP(lua_error(L));
+
return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp);
}
@@ -5543,6 +5573,9 @@ static int hlua_http_req_set_hdr(lua_State *L)
MAY_LJMP(check_args(L, 3, "req_set_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
hlua_http_del_hdr(L, htxn, &htxn->s->txn->req);
return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req);
}
@@ -5554,6 +5587,9 @@ static int hlua_http_res_set_hdr(lua_State *L)
MAY_LJMP(check_args(L, 3, "res_set_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+ if (htxn->dir != SMP_OPT_DIR_RES)
+ WILL_LJMP(lua_error(L));
+
hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp);
return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp);
}
@@ -5565,6 +5601,9 @@ static int hlua_http_req_set_meth(lua_State *L)
size_t name_len;
const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
lua_pushboolean(L, http_replace_req_line(0, name, name_len, htxn->p, htxn->s) != -1);
return 1;
}
@@ -5576,6 +5615,9 @@ static int hlua_http_req_set_path(lua_State *L)
size_t name_len;
const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
lua_pushboolean(L, http_replace_req_line(1, name, name_len, htxn->p, htxn->s) != -1);
return 1;
}
@@ -5587,6 +5629,9 @@ static int hlua_http_req_set_query(lua_State *L)
size_t name_len;
const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
/* Check length. */
if (name_len > trash.size - 1) {
lua_pushboolean(L, 0);
@@ -5611,6 +5656,9 @@ static int hlua_http_req_set_uri(lua_State *L)
size_t name_len;
const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+ if (htxn->dir != SMP_OPT_DIR_REQ)
+ WILL_LJMP(lua_error(L));
+
lua_pushboolean(L, http_replace_req_line(3, name, name_len, htxn->p, htxn->s) != -1);
return 1;
}
@@ -5622,6 +5670,9 @@ static int hlua_http_res_set_status(lua_State *L)
unsigned int code = MAY_LJMP(luaL_checkinteger(L, 2));
const char *reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL));
+ if (htxn->dir != SMP_OPT_DIR_RES)
+ WILL_LJMP(lua_error(L));
+
http_set_status(code, reason, htxn->s);
return 0;
}

View File

@ -0,0 +1,34 @@
commit b22f6501bc9838061472128360e0e55d08cb0bd9
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Fri Jul 26 14:54:52 2019 +0200
MINOR: hlua: Don't set request analyzers on response channel for lua actions
Setting some requests analyzers on the response channel was an old trick to be
sure to re-evaluate the request's analyers after the response's ones have been
called. It is no more necessary. In fact, this trick was removed in the version
1.8 and backported up to the version 1.6.
This patch must be backported to all versions since 1.6 to ease the backports of
fixes on the lua code.
(cherry picked from commit 51fa358432247fe5d7259d9d8a0e08d49d429c73)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/hlua.c b/src/hlua.c
index 21351cd6..36454cdc 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -6873,11 +6873,8 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
* is detected on a response channel. This is useful
* only for actions targeted on the requests.
*/
- if (HLUA_IS_WAKERESWR(s->hlua)) {
+ if (HLUA_IS_WAKERESWR(s->hlua))
s->res.flags |= CF_WAKE_WRITE;
- if ((analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE)))
- s->res.analysers |= analyzer;
- }
if (HLUA_IS_WAKEREQWR(s->hlua))
s->req.flags |= CF_WAKE_WRITE;
/* We can quit the function without consistency check

View File

@ -0,0 +1,110 @@
commit ff96b8bd3f85155f65b2b9c9f046fe3e40f630a4
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Fri Jul 26 15:09:53 2019 +0200
MINOR: hlua: Add a flag on the lua txn to know in which context it can be used
When a lua action or a lua sample fetch is called, a lua transaction is
created. It is an entry in the stack containing the class TXN. Thanks to it, we
can know the direction (request or response) of the call. But, for some
functions, it is also necessary to know if the buffer is "HTTP ready" for the
given direction. "HTTP ready" means there is a valid HTTP message in the
channel's buffer. So, when a lua action or a lua sample fetch is called, the
flag HLUA_TXN_HTTP_RDY is set if it is appropriate.
(cherry picked from commit bfab2dddad3ded87617d1e2db54761943d1eb32d)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/include/types/hlua.h b/include/types/hlua.h
index 70c76852..2f4e38be 100644
--- a/include/types/hlua.h
+++ b/include/types/hlua.h
@@ -43,7 +43,8 @@ struct stream;
#define HLUA_F_AS_STRING 0x01
#define HLUA_F_MAY_USE_HTTP 0x02
-#define HLUA_TXN_NOTERM 0x00000001
+#define HLUA_TXN_NOTERM 0x00000001
+#define HLUA_TXN_HTTP_RDY 0x00000002 /* Set if the txn is HTTP ready for the defined direction */
#define HLUA_CONCAT_BLOCSZ 2048
diff --git a/src/hlua.c b/src/hlua.c
index 36454cdc..d37e3c61 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -6494,6 +6494,7 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp
struct stream *stream = smp->strm;
const char *error;
const struct buffer msg = { };
+ unsigned int hflags = HLUA_TXN_NOTERM;
if (!stream)
return 0;
@@ -6517,6 +6518,13 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp
consistency_set(stream, smp->opt, &stream->hlua->cons);
+ if (stream->be->mode == PR_MODE_HTTP) {
+ if ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ)
+ hflags |= ((stream->txn->req.msg_state < HTTP_MSG_BODY) ? 0 : HLUA_TXN_HTTP_RDY);
+ else
+ hflags |= ((stream->txn->rsp.msg_state < HTTP_MSG_BODY) ? 0 : HLUA_TXN_HTTP_RDY);
+ }
+
/* If it is the first run, initialize the data for the call. */
if (!HLUA_IS_RUNNING(stream->hlua)) {
@@ -6541,8 +6549,7 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp
lua_rawgeti(stream->hlua->T, LUA_REGISTRYINDEX, fcn->function_ref);
/* push arguments in the stack. */
- if (!hlua_txn_new(stream->hlua->T, stream, smp->px, smp->opt & SMP_OPT_DIR,
- HLUA_TXN_NOTERM)) {
+ if (!hlua_txn_new(stream->hlua->T, stream, smp->px, smp->opt & SMP_OPT_DIR, hflags)) {
SEND_ERR(smp->px, "Lua sample-fetch '%s': full stack.\n", fcn->name);
RESET_SAFE_LJMP(stream->hlua->T);
return 0;
@@ -6759,16 +6766,16 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
struct session *sess, struct stream *s, int flags)
{
char **arg;
- unsigned int analyzer;
+ unsigned int hflags = 0;
int dir;
const char *error;
const struct buffer msg = { };
switch (rule->from) {
- case ACT_F_TCP_REQ_CNT: analyzer = AN_REQ_INSPECT_FE ; dir = SMP_OPT_DIR_REQ; break;
- case ACT_F_TCP_RES_CNT: analyzer = AN_RES_INSPECT ; dir = SMP_OPT_DIR_RES; break;
- case ACT_F_HTTP_REQ: analyzer = AN_REQ_HTTP_PROCESS_FE; dir = SMP_OPT_DIR_REQ; break;
- case ACT_F_HTTP_RES: analyzer = AN_RES_HTTP_PROCESS_BE; dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_TCP_REQ_CNT: ; dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_TCP_RES_CNT: ; dir = SMP_OPT_DIR_RES; break;
+ case ACT_F_HTTP_REQ: hflags = HLUA_TXN_HTTP_RDY ; dir = SMP_OPT_DIR_REQ; break;
+ case ACT_F_HTTP_RES: hflags = HLUA_TXN_HTTP_RDY ; dir = SMP_OPT_DIR_RES; break;
default:
SEND_ERR(px, "Lua: internal error while execute action.\n");
return ACT_RET_CONT;
@@ -6821,7 +6828,7 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
lua_rawgeti(s->hlua->T, LUA_REGISTRYINDEX, rule->arg.hlua_rule->fcn.function_ref);
/* Create and and push object stream in the stack. */
- if (!hlua_txn_new(s->hlua->T, s, px, dir, 0)) {
+ if (!hlua_txn_new(s->hlua->T, s, px, dir, hflags)) {
SEND_ERR(px, "Lua function '%s': full stack.\n",
rule->arg.hlua_rule->fcn.name);
RESET_SAFE_LJMP(s->hlua->T);
@@ -6864,9 +6871,9 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
case HLUA_E_AGAIN:
/* Set timeout in the required channel. */
if (s->hlua->wake_time != TICK_ETERNITY) {
- if (analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE))
+ if (dir & SMP_OPT_DIR_REQ)
s->req.analyse_exp = s->hlua->wake_time;
- else if (analyzer & (AN_RES_INSPECT|AN_RES_HTTP_PROCESS_BE))
+ else
s->res.analyse_exp = s->hlua->wake_time;
}
/* Some actions can be wake up when a "write" event

View File

@ -0,0 +1,180 @@
commit 2351ca211d655c1be9ef6d62880899102134266d
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Fri Jul 26 16:31:34 2019 +0200
BUG/MINOR: hlua: Only execute functions of HTTP class if the txn is HTTP ready
The flag HLUA_TXN_HTTP_RDY was added in the previous commit to know when a
function is called for a channel with a valid HTTP message or not. Of course it
also depends on the calling direction. In this commit, we allow the execution of
functions of the HTTP class only if this flag is set.
Nobody seems to use them from an unsupported context (for instance, trying to
set an HTTP header from a tcp-request rule). But it remains a bug leading to
undefined behaviors or crashes.
This patch may be backported to all versions since the 1.6. It depends on the
commits "MINOR: hlua: Add a flag on the lua txn to know in which context it can
be used" and "MINOR: hlua: Don't set request analyzers on response channel for
lua actions".
(cherry picked from commit 301eff8e215d5dc7130e1ebacd7cf8da09a4f643)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/hlua.c b/src/hlua.c
index d37e3c61..4d92fa44 100644
--- a/src/hlua.c
+++ b/src/hlua.c
@@ -5346,7 +5346,7 @@ __LJMP static int hlua_http_req_get_headers(lua_State *L)
MAY_LJMP(check_args(L, 1, "req_get_headers"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return hlua_http_get_headers(L, htxn, &htxn->s->txn->req);
@@ -5359,7 +5359,7 @@ __LJMP static int hlua_http_res_get_headers(lua_State *L)
MAY_LJMP(check_args(L, 1, "res_get_headers"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_RES)
+ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return hlua_http_get_headers(L, htxn, &htxn->s->txn->rsp);
@@ -5399,7 +5399,7 @@ __LJMP static int hlua_http_req_rep_hdr(lua_State *L)
MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_HDR));
@@ -5412,7 +5412,7 @@ __LJMP static int hlua_http_res_rep_hdr(lua_State *L)
MAY_LJMP(check_args(L, 4, "res_rep_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_RES)
+ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_HDR));
@@ -5425,7 +5425,7 @@ __LJMP static int hlua_http_req_rep_val(lua_State *L)
MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_VAL));
@@ -5438,7 +5438,7 @@ __LJMP static int hlua_http_res_rep_val(lua_State *L)
MAY_LJMP(check_args(L, 4, "res_rep_val"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_RES)
+ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_VAL));
@@ -5480,7 +5480,7 @@ __LJMP static int hlua_http_req_del_hdr(lua_State *L)
MAY_LJMP(check_args(L, 2, "req_del_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return hlua_http_del_hdr(L, htxn, &htxn->s->txn->req);
@@ -5493,7 +5493,7 @@ __LJMP static int hlua_http_res_del_hdr(lua_State *L)
MAY_LJMP(check_args(L, 2, "res_del_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_RES)
+ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp);
@@ -5547,7 +5547,7 @@ __LJMP static int hlua_http_req_add_hdr(lua_State *L)
MAY_LJMP(check_args(L, 3, "req_add_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req);
@@ -5560,7 +5560,7 @@ __LJMP static int hlua_http_res_add_hdr(lua_State *L)
MAY_LJMP(check_args(L, 3, "res_add_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_RES)
+ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp);
@@ -5573,7 +5573,7 @@ static int hlua_http_req_set_hdr(lua_State *L)
MAY_LJMP(check_args(L, 3, "req_set_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
hlua_http_del_hdr(L, htxn, &htxn->s->txn->req);
@@ -5587,7 +5587,7 @@ static int hlua_http_res_set_hdr(lua_State *L)
MAY_LJMP(check_args(L, 3, "res_set_hdr"));
htxn = MAY_LJMP(hlua_checkhttp(L, 1));
- if (htxn->dir != SMP_OPT_DIR_RES)
+ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp);
@@ -5601,7 +5601,7 @@ static int hlua_http_req_set_meth(lua_State *L)
size_t name_len;
const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
lua_pushboolean(L, http_replace_req_line(0, name, name_len, htxn->p, htxn->s) != -1);
@@ -5615,7 +5615,7 @@ static int hlua_http_req_set_path(lua_State *L)
size_t name_len;
const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
lua_pushboolean(L, http_replace_req_line(1, name, name_len, htxn->p, htxn->s) != -1);
@@ -5629,7 +5629,7 @@ static int hlua_http_req_set_query(lua_State *L)
size_t name_len;
const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
/* Check length. */
@@ -5656,7 +5656,7 @@ static int hlua_http_req_set_uri(lua_State *L)
size_t name_len;
const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
- if (htxn->dir != SMP_OPT_DIR_REQ)
+ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
lua_pushboolean(L, http_replace_req_line(3, name, name_len, htxn->p, htxn->s) != -1);
@@ -5670,7 +5670,7 @@ static int hlua_http_res_set_status(lua_State *L)
unsigned int code = MAY_LJMP(luaL_checkinteger(L, 2));
const char *reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL));
- if (htxn->dir != SMP_OPT_DIR_RES)
+ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
WILL_LJMP(lua_error(L));
http_set_status(code, reason, htxn->s);

View File

@ -0,0 +1,37 @@
commit 3cd7a1ea5110fc6a92627aaad06553a49723ac92
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Mon Jul 29 10:50:28 2019 +0200
BUG/MINOR: htx: Fix free space addresses calculation during a block expansion
When the payload of a block is shrinked or enlarged, addresses of the free
spaces must be updated. There are many possible cases. One of them is
buggy. When there is only one block in the HTX message and its payload is just
before the tail room and it needs to be moved in the head room to be enlarged,
addresses are not correctly updated. This bug may be hit by the compression
filter.
This patch must be backported to 2.0.
(cherry picked from commit 61ed7797f6440ee1102576365553650b1982a233)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/htx.c b/src/htx.c
index c29a66d7..cd21050c 100644
--- a/src/htx.c
+++ b/src/htx.c
@@ -252,11 +252,13 @@ static int htx_prepare_blk_expansion(struct htx *htx, struct htx_blk *blk, int32
ret = 1;
}
else if ((sz + delta) < headroom) {
+ uint32_t oldaddr = blk->addr;
+
/* Move the block's payload into the headroom */
blk->addr = htx->head_addr;
htx->tail_addr -= sz;
htx->head_addr += sz + delta;
- if (blk->addr == htx->end_addr) {
+ if (oldaddr == htx->end_addr) {
if (htx->end_addr == htx->tail_addr) {
htx->tail_addr = htx->head_addr;
htx->head_addr = htx->end_addr = 0;

View File

@ -0,0 +1,225 @@
commit 0ff395c154ad827c0c30eefc9371ba7f7c171027
Author: Willy Tarreau <w@1wt.eu>
Date: Tue Jul 30 11:59:34 2019 +0200
BUG/MAJOR: queue/threads: avoid an AB/BA locking issue in process_srv_queue()
A problem involving server slowstart was reported by @max2k1 in issue #197.
The problem is that pendconn_grab_from_px() takes the proxy lock while
already under the server's lock while process_srv_queue() first takes the
proxy's lock then the server's lock.
While the latter seems more natural, it is fundamentally incompatible with
mayn other operations performed on servers, namely state change propagation,
where the proxy is only known after the server and cannot be locked around
the servers. Howwever reversing the lock in process_srv_queue() is trivial
and only the few functions related to dynamic cookies need to be adjusted
for this so that the proxy's lock is taken for each server operation. This
is possible because the proxy's server list is built once at boot time and
remains stable. So this is what this patch does.
The comments in the proxy and server structs were updated to mention this
rule that the server's lock may not be taken under the proxy's lock but
may enclose it.
Another approach could consist in using a second lock for the proxy's queue
which would be different from the regular proxy's lock, but given that the
operations above are rare and operate on small servers list, there is no
reason for overdesigning a solution.
This fix was successfully tested with 10000 servers in a backend where
adjusting the dyncookies in loops over the CLI didn't have a measurable
impact on the traffic.
The only workaround without the fix is to disable any occurrence of
"slowstart" on server lines, or to disable threads using "nbthread 1".
This must be backported as far as 1.8.
(cherry picked from commit 5e83d996cf965ee5ac625f702a446f4d8c80a220)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/include/types/proxy.h b/include/types/proxy.h
index ca24dbfe..2518f88d 100644
--- a/include/types/proxy.h
+++ b/include/types/proxy.h
@@ -487,7 +487,7 @@ struct proxy {
* name is used
*/
struct list filter_configs; /* list of the filters that are declared on this proxy */
- __decl_hathreads(HA_SPINLOCK_T lock);
+ __decl_hathreads(HA_SPINLOCK_T lock); /* may be taken under the server's lock */
};
struct switching_rule {
diff --git a/include/types/server.h b/include/types/server.h
index 4a077268..e0534162 100644
--- a/include/types/server.h
+++ b/include/types/server.h
@@ -319,7 +319,7 @@ struct server {
} ssl_ctx;
#endif
struct dns_srvrq *srvrq; /* Pointer representing the DNS SRV requeest, if any */
- __decl_hathreads(HA_SPINLOCK_T lock);
+ __decl_hathreads(HA_SPINLOCK_T lock); /* may enclose the proxy's lock, must not be taken under */
struct {
const char *file; /* file where the section appears */
struct eb32_node id; /* place in the tree of used IDs */
diff --git a/src/proxy.c b/src/proxy.c
index ae761ead..a537e0b1 100644
--- a/src/proxy.c
+++ b/src/proxy.c
@@ -1940,9 +1940,12 @@ static int cli_parse_enable_dyncookie_backend(char **args, char *payload, struct
if (!px)
return 1;
+ /* Note: this lock is to make sure this doesn't change while another
+ * thread is in srv_set_dyncookie().
+ */
HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
-
px->ck_opts |= PR_CK_DYNAMIC;
+ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
for (s = px->srv; s != NULL; s = s->next) {
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
@@ -1950,8 +1953,6 @@ static int cli_parse_enable_dyncookie_backend(char **args, char *payload, struct
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
}
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
-
return 1;
}
@@ -1971,9 +1972,12 @@ static int cli_parse_disable_dyncookie_backend(char **args, char *payload, struc
if (!px)
return 1;
+ /* Note: this lock is to make sure this doesn't change while another
+ * thread is in srv_set_dyncookie().
+ */
HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
-
px->ck_opts &= ~PR_CK_DYNAMIC;
+ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
for (s = px->srv; s != NULL; s = s->next) {
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
@@ -1984,8 +1988,6 @@ static int cli_parse_disable_dyncookie_backend(char **args, char *payload, struc
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
}
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
-
return 1;
}
@@ -2021,10 +2023,13 @@ static int cli_parse_set_dyncookie_key_backend(char **args, char *payload, struc
return 1;
}
+ /* Note: this lock is to make sure this doesn't change while another
+ * thread is in srv_set_dyncookie().
+ */
HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
-
free(px->dyncookie_key);
px->dyncookie_key = newkey;
+ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
for (s = px->srv; s != NULL; s = s->next) {
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
@@ -2032,8 +2037,6 @@ static int cli_parse_set_dyncookie_key_backend(char **args, char *payload, struc
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
}
- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
-
return 1;
}
diff --git a/src/queue.c b/src/queue.c
index f4a94530..6aa54170 100644
--- a/src/queue.c
+++ b/src/queue.c
@@ -312,16 +312,16 @@ void process_srv_queue(struct server *s)
struct proxy *p = s->proxy;
int maxconn;
- HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+ HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
maxconn = srv_dynamic_maxconn(s);
while (s->served < maxconn) {
int ret = pendconn_process_next_strm(s, p);
if (!ret)
break;
}
- HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
+ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
}
/* Adds the stream <strm> to the pending connection queue of server <strm>->srv
@@ -424,7 +424,8 @@ int pendconn_redistribute(struct server *s)
/* Check for pending connections at the backend, and assign some of them to
* the server coming up. The server's weight is checked before being assigned
* connections it may not be able to handle. The total number of transferred
- * connections is returned.
+ * connections is returned. It must be called with the server lock held, and
+ * will take the proxy's lock.
*/
int pendconn_grab_from_px(struct server *s)
{
diff --git a/src/server.c b/src/server.c
index a96f1ef6..236d6bae 100644
--- a/src/server.c
+++ b/src/server.c
@@ -125,7 +125,7 @@ static inline void srv_check_for_dup_dyncookie(struct server *s)
}
/*
- * Must be called with the server lock held.
+ * Must be called with the server lock held, and will grab the proxy lock.
*/
void srv_set_dyncookie(struct server *s)
{
@@ -137,15 +137,17 @@ void srv_set_dyncookie(struct server *s)
int addr_len;
int port;
+ HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
+
if ((s->flags & SRV_F_COOKIESET) ||
!(s->proxy->ck_opts & PR_CK_DYNAMIC) ||
s->proxy->dyncookie_key == NULL)
- return;
+ goto out;
key_len = strlen(p->dyncookie_key);
if (s->addr.ss_family != AF_INET &&
s->addr.ss_family != AF_INET6)
- return;
+ goto out;
/*
* Buffer to calculate the cookie value.
* The buffer contains the secret key + the server IP address
@@ -174,7 +176,7 @@ void srv_set_dyncookie(struct server *s)
hash_value = XXH64(tmpbuf, buffer_len, 0);
memprintf(&s->cookie, "%016llx", hash_value);
if (!s->cookie)
- return;
+ goto out;
s->cklen = 16;
/* Don't bother checking if the dyncookie is duplicated if
@@ -183,6 +185,8 @@ void srv_set_dyncookie(struct server *s)
*/
if (!(s->next_admin & SRV_ADMF_FMAINT))
srv_check_for_dup_dyncookie(s);
+ out:
+ HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
}
/*

View File

@ -0,0 +1,71 @@
commit da767eaaf6128eccd349a54ec6eac2a68dcacacb
Author: Willy Tarreau <w@1wt.eu>
Date: Wed Jul 31 19:15:45 2019 +0200
BUG/MINOR: debug: fix a small race in the thread dumping code
If a thread dump is requested from a signal handler, it may interrupt
a thread already waiting for a dump to complete, and may see the
threads_to_dump variable go to zero while others are waiting, steal
the lock and prevent other threads from ever completing. This tends
to happen when dumping many threads upon a watchdog timeout, to threads
waiting for their turn.
Instead now we proceed in two steps :
1) the last dumped thread sets all bits again
2) all threads only wait for their own bit to appear, then clear it
and quit
This way there's no risk that a bit performs a double flip in the same
loop and threads cannot get stuck here anymore.
This should be backported to 2.0 as it clarifies stack traces.
(cherry picked from commit c07736209db764fb2aef6f18ed3687a504c35771)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/debug.c b/src/debug.c
index 059bc6b9..07624ca5 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -440,8 +440,8 @@ void debug_handler(int sig, siginfo_t *si, void *arg)
* 1- wait for our turn, i.e. when all lower bits are gone.
* 2- perform the action if our bit is set
* 3- remove our bit to let the next one go, unless we're
- * the last one and have to put them all but ours
- * 4- wait for zero and clear our bit if it's set
+ * the last one and have to put them all as a signal
+ * 4- wait out bit to re-appear, then clear it and quit.
*/
/* wait for all previous threads to finish first */
@@ -454,7 +454,7 @@ void debug_handler(int sig, siginfo_t *si, void *arg)
ha_thread_dump(thread_dump_buffer, tid, thread_dump_tid);
if ((threads_to_dump & all_threads_mask) == tid_bit) {
/* last one */
- HA_ATOMIC_STORE(&threads_to_dump, all_threads_mask & ~tid_bit);
+ HA_ATOMIC_STORE(&threads_to_dump, all_threads_mask);
thread_dump_buffer = NULL;
}
else
@@ -462,14 +462,13 @@ void debug_handler(int sig, siginfo_t *si, void *arg)
}
/* now wait for all others to finish dumping. The last one will set all
- * bits again to broadcast the leaving condition.
+ * bits again to broadcast the leaving condition so we'll see ourselves
+ * present again. This way the threads_to_dump variable never passes to
+ * zero until all visitors have stopped waiting.
*/
- while (threads_to_dump & all_threads_mask) {
- if (threads_to_dump & tid_bit)
- HA_ATOMIC_AND(&threads_to_dump, ~tid_bit);
- else
- ha_thread_relax();
- }
+ while (!(threads_to_dump & tid_bit))
+ ha_thread_relax();
+ HA_ATOMIC_AND(&threads_to_dump, ~tid_bit);
/* mark the current thread as stuck to detect it upon next invocation
* if it didn't move.

View File

@ -0,0 +1,70 @@
commit 445b2b7c52a13678241a190c4ff52e77a09ef0a6
Author: Willy Tarreau <w@1wt.eu>
Date: Wed Jul 31 19:20:39 2019 +0200
MINOR: wdt: also consider that waiting in the thread dumper is normal
It happens that upon looping threads the watchdog fires, starts a dump,
and other threads expire their budget while waiting for the other threads
to get dumped and trigger a watchdog event again, adding some confusion
to the traces. With this patch the situation becomes clearer as we export
the list of threads being dumped so that the watchdog can check it before
deciding to trigger. This way such threads in queue for being dumped are
not attempted to be reported in turn.
This should be backported to 2.0 as it helps understand stack traces.
(cherry picked from commit a37cb1880c81b1f038e575d88ba7210aea0b7b8f)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/include/common/debug.h b/include/common/debug.h
index 333203dd..f43258e9 100644
--- a/include/common/debug.h
+++ b/include/common/debug.h
@@ -70,6 +70,7 @@
struct task;
struct buffer;
+extern volatile unsigned long threads_to_dump;
void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx);
void ha_thread_dump(struct buffer *buf, int thr, int calling_tid);
void ha_thread_dump_all_to_trash();
diff --git a/src/debug.c b/src/debug.c
index 07624ca5..3077e97c 100644
--- a/src/debug.c
+++ b/src/debug.c
@@ -29,6 +29,11 @@
#include <proto/stream_interface.h>
#include <proto/task.h>
+/* mask of threads still having to dump, used to respect ordering. Only used
+ * when USE_THREAD_DUMP is set.
+ */
+volatile unsigned long threads_to_dump = 0;
+
/* Dumps to the buffer some known information for the desired thread, and
* optionally extra info for the current thread. The dump will be appended to
* the buffer, so the caller is responsible for preliminary initializing it.
@@ -405,9 +410,6 @@ void ha_thread_dump_all_to_trash()
*/
#define DEBUGSIG SIGURG
-/* mask of threads still having to dump, used to respect ordering */
-static volatile unsigned long threads_to_dump;
-
/* ID of the thread requesting the dump */
static unsigned int thread_dump_tid;
diff --git a/src/wdt.c b/src/wdt.c
index 19d36c34..aa89fd44 100644
--- a/src/wdt.c
+++ b/src/wdt.c
@@ -75,7 +75,7 @@ void wdt_handler(int sig, siginfo_t *si, void *arg)
if (n - p < 1000000000UL)
goto update_and_leave;
- if ((threads_harmless_mask|sleeping_thread_mask) & (1UL << thr)) {
+ if ((threads_harmless_mask|sleeping_thread_mask|threads_to_dump) & (1UL << thr)) {
/* This thread is currently doing exactly nothing
* waiting in the poll loop (unlikely but possible),
* waiting for all other threads to join the rendez-vous

View File

@ -0,0 +1,56 @@
commit 0fc2d46fabb2b9317daf7030162e828c7e1684d5
Author: Christopher Faulet <cfaulet@haproxy.com>
Date: Thu Aug 1 10:09:29 2019 +0200
BUG/MEDIUM: lb-chash: Ensure the tree integrity when server weight is increased
When the server weight is increased in consistant hash, extra nodes have to be
allocated. So a realloc() is performed on the nodes array of the server. the
previous commit 962ea7732 ("BUG/MEDIUM: lb-chash: Remove all server's entries
before realloc() to re-insert them after") have fixed the size used during the
realloc() to avoid segfaults. But another bug remains. After the realloc(), the
memory area allocated for the nodes array may change, invalidating all node
addresses in the chash tree.
So, to fix the bug, we must remove all server's entries from the chash tree
before the realloc to insert all of them after, old nodes and new ones. The
insert will be automatically handled by the loop at the end of the function
chash_queue_dequeue_srv().
Note that if the call to realloc() failed, no new entries will be created for
the server, so the effective server weight will be unchanged.
This issue was reported on Github (#189).
This patch must be backported to all versions since the 1.6.
(cherry picked from commit 0a52c17f819a5b0a17718b605bdd990b9e2b58e6)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/lb_chash.c b/src/lb_chash.c
index 0bf4e81a..23448df8 100644
--- a/src/lb_chash.c
+++ b/src/lb_chash.c
@@ -84,8 +84,13 @@ static inline void chash_queue_dequeue_srv(struct server *s)
* increased the weight beyond the original weight
*/
if (s->lb_nodes_tot < s->next_eweight) {
- struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes));
+ struct tree_occ *new_nodes;
+ /* First we need to remove all server's entries from its tree
+ * because the realloc will change all nodes pointers */
+ chash_dequeue_srv(s);
+
+ new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes));
if (new_nodes) {
unsigned int j;
@@ -494,7 +499,6 @@ void chash_init_server_tree(struct proxy *p)
srv->lb_nodes_tot = srv->uweight * BE_WEIGHT_SCALE;
srv->lb_nodes_now = 0;
srv->lb_nodes = calloc(srv->lb_nodes_tot, sizeof(struct tree_occ));
-
for (node = 0; node < srv->lb_nodes_tot; node++) {
srv->lb_nodes[node].server = srv;
srv->lb_nodes[node].node.key = full_hash(srv->puid * SRV_EWGHT_RANGE + node);

View File

@ -0,0 +1,71 @@
commit c0968f59b723dfa9effa63ac28b59642b11c6b8b
Author: Richard Russo <russor@whatsapp.com>
Date: Wed Jul 31 11:45:56 2019 -0700
BUG/MAJOR: http/sample: use a static buffer for raw -> htx conversion
Multiple calls to smp_fetch_fhdr use the header context to keep track of
header parsing position; however, when using header sampling on a raw
connection, the raw buffer is converted into an HTX structure each time, and
this was done in the trash areas; so the block reference would be invalid on
subsequent calls.
This patch must be backported to 2.0 and 1.9.
(cherry picked from commit 458eafb36df88932a02d1ce7ca31832abf11b8b3)
Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
diff --git a/src/http_fetch.c b/src/http_fetch.c
index 67ea2094..e372a122 100644
--- a/src/http_fetch.c
+++ b/src/http_fetch.c
@@ -46,10 +46,40 @@
/* this struct is used between calls to smp_fetch_hdr() or smp_fetch_cookie() */
static THREAD_LOCAL struct hdr_ctx static_hdr_ctx;
static THREAD_LOCAL struct http_hdr_ctx static_http_hdr_ctx;
+/* this is used to convert raw connection buffers to htx */
+static THREAD_LOCAL struct buffer static_raw_htx_chunk;
+static THREAD_LOCAL char *static_raw_htx_buf;
#define SMP_REQ_CHN(smp) (smp->strm ? &smp->strm->req : NULL)
#define SMP_RES_CHN(smp) (smp->strm ? &smp->strm->res : NULL)
+/* This function returns the static htx chunk, where raw connections get
+ * converted to HTX as needed for samplxsing.
+ */
+struct buffer *get_raw_htx_chunk(void)
+{
+ chunk_reset(&static_raw_htx_chunk);
+ return &static_raw_htx_chunk;
+}
+
+static int alloc_raw_htx_chunk_per_thread()
+{
+ static_raw_htx_buf = malloc(global.tune.bufsize);
+ if (!static_raw_htx_buf)
+ return 0;
+ chunk_init(&static_raw_htx_chunk, static_raw_htx_buf, global.tune.bufsize);
+ return 1;
+}
+
+static void free_raw_htx_chunk_per_thread()
+{
+ free(static_raw_htx_buf);
+ static_raw_htx_buf = NULL;
+}
+
+REGISTER_PER_THREAD_ALLOC(alloc_raw_htx_chunk_per_thread);
+REGISTER_PER_THREAD_FREE(free_raw_htx_chunk_per_thread);
+
/*
* Returns the data from Authorization header. Function may be called more
* than once so data is stored in txn->auth_data. When no header is found
@@ -265,7 +295,7 @@ struct htx *smp_prefetch_htx(struct sample *smp, struct channel *chn, int vol)
else if (h1m.flags & H1_MF_CLEN)
flags |= HTX_SL_F_CLEN;
- htx = htx_from_buf(get_trash_chunk());
+ htx = htx_from_buf(get_raw_htx_chunk());
sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, h1sl.rq.m, h1sl.rq.u, h1sl.rq.v);
if (!sl || !htx_add_all_headers(htx, hdrs))
return NULL;

View File

@ -0,0 +1,46 @@
commit 7343c710152c586a232a194ef37a56af636d6a56
Author: Willy Tarreau <w@1wt.eu>
Date: Thu Aug 1 18:51:38 2019 +0200
BUG/MINOR: stream-int: also update analysers timeouts on activity
Between 1.6 and 1.7, some parts of the stream forwarding process were
moved into lower layers and the stream-interface had to keep the
stream's task up to date regarding the timeouts. The analyser timeouts
were not updated there as it was believed this was not needed during
forwarding, but actually there is a case for this which is "option
contstats" which periodically triggers the analyser timeout, and this
change broke the option in case of sustained traffic (if there is some
I/O activity during the same millisecond as the timeout expires, then
the update will be missed).
This patch simply brings back the analyser expiration updates from
process_stream() to stream_int_notify().
It may be backported as far as 1.7, taking care to adjust the fields
names if needed.
(cherry picked from commit 45bcb37f0f8fa1e16dd9358a59dc280a38834dcd)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/stream_interface.c b/src/stream_interface.c
index 9b9a8e9f..7d89cc90 100644
--- a/src/stream_interface.c
+++ b/src/stream_interface.c
@@ -558,6 +558,16 @@ static void stream_int_notify(struct stream_interface *si)
task->expire = tick_first((tick_is_expired(task->expire, now_ms) ? 0 : task->expire),
tick_first(tick_first(ic->rex, ic->wex),
tick_first(oc->rex, oc->wex)));
+
+ task->expire = tick_first(task->expire, ic->analyse_exp);
+ task->expire = tick_first(task->expire, oc->analyse_exp);
+
+ if (si->exp)
+ task->expire = tick_first(task->expire, si->exp);
+
+ if (sio->exp)
+ task->expire = tick_first(task->expire, sio->exp);
+
task_queue(task);
}
if (ic->flags & CF_READ_ACTIVITY)

View File

@ -0,0 +1,37 @@
commit a8fcdacb8cc0dddec72b1ddc4d9afc92d3684acd
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Aug 2 07:48:47 2019 +0200
BUG/MEDIUM: mux-h2: unbreak receipt of large DATA frames
Recent optimization in commit 4d7a88482 ("MEDIUM: mux-h2: don't try to
read more than needed") broke the receipt of large DATA frames because
it would unconditionally subscribe if there was some room left, thus
preventing any new rx from being done since subscription may only be
done once the end was reached, as indicated by ret == 0.
However, fixing this uncovered that in HTX mode previous versions might
occasionally be affected as well, when an available frame is the same
size as the maximum data that may fit into an HTX buffer, we may end
up reading that whole frame and still subscribe since it's still allowed
to receive, thus causing issues to read the next frame.
This patch will only work for 2.1-dev but a minor adaptation will be
needed for earlier versions (down to 1.9, where subscribe() was added).
(cherry picked from commit 9bc1c95855b9c6300de5ecf3720cbe4b2558c5a1)
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/mux_h2.c b/src/mux_h2.c
index 5bb85181..d605fe94 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -2766,7 +2766,7 @@ static int h2_recv(struct h2c *h2c)
ret = 0;
} while (ret > 0);
- if (h2_recv_allowed(h2c) && (b_data(buf) < buf->size))
+ if (max && !ret && h2_recv_allowed(h2c))
conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
if (!b_data(buf)) {

View File

@ -0,0 +1,227 @@
commit 5a9c875f0f1ee83bd5889dd1ad53e9da43e6c34e
Author: Willy Tarreau <w@1wt.eu>
Date: Fri Aug 2 07:52:08 2019 +0200
BUG/MEDIUM: mux-h2: split the stream's and connection's window sizes
The SETTINGS frame parser updates all streams' window for each
INITIAL_WINDOW_SIZE setting received on the connection (like h2spec
does in test 6.5.3), which can start to be expensive if repeated when
there are many streams (up to 100 by default). A quick test shows that
it's possible to parse only 35000 settings per second on a 3 GHz core
for 100 streams, which is rather small.
Given that window sizes are relative and may be negative, there's no
point in pre-initializing them for each stream and update them from
the settings. Instead, let's make them relative to the connection's
initial window size so that any change immediately affects all streams.
The only thing that remains needed is to wake up the streams that were
unblocked by the update, which is now done once at the end of
h2_process_demux() instead of once per setting. This now results in
5.7 million settings being processed per second, which is way better.
In order to keep the change small, the h2s' mws field was renamed to
"sws" for "stream window size", and an h2s_mws() function was added
to add it to the connection's initial window setting and determine the
window size to use when muxing. The h2c_update_all_ws() function was
renamed to h2c_unblock_sfctl() since it's now only used to unblock
previously blocked streams.
This needs to be backported to all versions till 1.8.
(cherry picked from commit 1d4a0f88100daeb17dd0c9470c659b1ec288bc07)
[wt: context adjustment, port to legacy parts]
Signed-off-by: Willy Tarreau <w@1wt.eu>
diff --git a/src/mux_h2.c b/src/mux_h2.c
index d605fe94..f90e9435 100644
--- a/src/mux_h2.c
+++ b/src/mux_h2.c
@@ -208,7 +208,7 @@ struct h2s {
struct eb32_node by_id; /* place in h2c's streams_by_id */
int32_t id; /* stream ID */
uint32_t flags; /* H2_SF_* */
- int mws; /* mux window size for this stream */
+ int sws; /* stream window size, to be added to the mux's initial window size */
enum h2_err errcode; /* H2 err code (H2_ERR_*) */
enum h2_ss st;
uint16_t status; /* HTTP response status */
@@ -707,6 +707,14 @@ static inline __maybe_unused int h2s_id(const struct h2s *h2s)
return h2s ? h2s->id : 0;
}
+/* returns the sum of the stream's own window size and the mux's initial
+ * window, which together form the stream's effective window size.
+ */
+static inline int h2s_mws(const struct h2s *h2s)
+{
+ return h2s->sws + h2s->h2c->miw;
+}
+
/* returns true of the mux is currently busy as seen from stream <h2s> */
static inline __maybe_unused int h2c_mux_busy(const struct h2c *h2c, const struct h2s *h2s)
{
@@ -945,7 +953,7 @@ static struct h2s *h2s_new(struct h2c *h2c, int id)
LIST_INIT(&h2s->sending_list);
h2s->h2c = h2c;
h2s->cs = NULL;
- h2s->mws = h2c->miw;
+ h2s->sws = 0;
h2s->flags = H2_SF_NONE;
h2s->errcode = H2_ERR_NO_ERROR;
h2s->st = H2_SS_IDLE;
@@ -1543,30 +1551,23 @@ static void h2_wake_some_streams(struct h2c *h2c, int last)
}
}
-/* Increase all streams' outgoing window size by the difference passed in
- * argument. This is needed upon receipt of the settings frame if the initial
- * window size is different. The difference may be negative and the resulting
- * window size as well, for the time it takes to receive some window updates.
+/* Wake up all blocked streams whose window size has become positive after the
+ * mux's initial window was adjusted. This should be done after having processed
+ * SETTINGS frames which have updated the mux's initial window size.
*/
-static void h2c_update_all_ws(struct h2c *h2c, int diff)
+static void h2c_unblock_sfctl(struct h2c *h2c)
{
struct h2s *h2s;
struct eb32_node *node;
- if (!diff)
- return;
-
node = eb32_first(&h2c->streams_by_id);
while (node) {
h2s = container_of(node, struct h2s, by_id);
- h2s->mws += diff;
-
- if (h2s->mws > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
+ if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
h2s->flags &= ~H2_SF_BLK_SFCTL;
if (h2s->send_wait && !LIST_ADDED(&h2s->list))
LIST_ADDQ(&h2c->send_list, &h2s->list);
}
-
node = eb32_next(node);
}
}
@@ -1607,7 +1608,6 @@ static int h2c_handle_settings(struct h2c *h2c)
error = H2_ERR_FLOW_CONTROL_ERROR;
goto fail;
}
- h2c_update_all_ws(h2c, arg - h2c->miw);
h2c->miw = arg;
break;
case H2_SETTINGS_MAX_FRAME_SIZE:
@@ -1869,13 +1869,13 @@ static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
goto strm_err;
}
- if (h2s->mws >= 0 && h2s->mws + inc < 0) {
+ if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
error = H2_ERR_FLOW_CONTROL_ERROR;
goto strm_err;
}
- h2s->mws += inc;
- if (h2s->mws > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
+ h2s->sws += inc;
+ if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
h2s->flags &= ~H2_SF_BLK_SFCTL;
if (h2s->send_wait && !LIST_ADDED(&h2s->list))
LIST_ADDQ(&h2c->send_list, &h2s->list);
@@ -2237,6 +2237,7 @@ static void h2_process_demux(struct h2c *h2c)
struct h2s *h2s = NULL, *tmp_h2s;
struct h2_fh hdr;
unsigned int padlen = 0;
+ int32_t old_iw = h2c->miw;
if (h2c->st0 >= H2_CS_ERROR)
return;
@@ -2625,6 +2626,9 @@ static void h2_process_demux(struct h2c *h2c)
h2s_notify_recv(h2s);
}
+ if (old_iw != h2c->miw)
+ h2c_unblock_sfctl(h2c);
+
h2c_restart_reading(h2c, 0);
}
@@ -4259,8 +4263,8 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf,
if (size > max)
size = max;
- if (size > h2s->mws)
- size = h2s->mws;
+ if (size > h2s_mws(h2s))
+ size = h2s_mws(h2s);
if (size <= 0) {
h2s->flags |= H2_SF_BLK_SFCTL;
@@ -4362,7 +4366,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf,
ofs += size;
total += size;
h1m->curr_len -= size;
- h2s->mws -= size;
+ h2s->sws -= size;
h2c->mws -= size;
if (size && !h1m->curr_len && (h1m->flags & H1_MF_CHNK)) {
@@ -4390,7 +4394,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf,
}
end:
- trace("[%d] sent simple H2 DATA response (sid=%d) = %d bytes out (%u in, st=%s, ep=%u, es=%s, h2cws=%d h2sws=%d) data=%u", h2c->st0, h2s->id, size+9, (unsigned int)total, h1m_state_str(h1m->state), h1m->err_pos, h1m_state_str(h1m->err_state), h2c->mws, h2s->mws, (unsigned int)b_data(buf));
+ trace("[%d] sent simple H2 DATA response (sid=%d) = %d bytes out (%u in, st=%s, ep=%u, es=%s, h2cws=%d h2sws=%d) data=%u", h2c->st0, h2s->id, size+9, (unsigned int)total, h1m_state_str(h1m->state), h1m->err_pos, h1m_state_str(h1m->err_state), h2c->mws, h2s_mws(h2s), (unsigned int)b_data(buf));
return total;
}
@@ -4937,7 +4941,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
*/
if (unlikely(fsize == count &&
htx->used == 1 && type == HTX_BLK_DATA &&
- fsize <= h2s->mws && fsize <= h2c->mws && fsize <= h2c->mfs)) {
+ fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
void *old_area = mbuf->area;
if (b_data(mbuf)) {
@@ -4972,7 +4976,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
h2_set_frame_size(outbuf.area, fsize);
/* update windows */
- h2s->mws -= fsize;
+ h2s->sws -= fsize;
h2c->mws -= fsize;
/* and exchange with our old area */
@@ -5024,7 +5028,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
if (!fsize)
goto send_empty;
- if (h2s->mws <= 0) {
+ if (h2s_mws(h2s) <= 0) {
h2s->flags |= H2_SF_BLK_SFCTL;
if (LIST_ADDED(&h2s->list))
LIST_DEL_INIT(&h2s->list);
@@ -5034,8 +5038,8 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
if (fsize > count)
fsize = count;
- if (fsize > h2s->mws)
- fsize = h2s->mws; // >0
+ if (fsize > h2s_mws(h2s))
+ fsize = h2s_mws(h2s); // >0
if (h2c->mfs && fsize > h2c->mfs)
fsize = h2c->mfs; // >0
@@ -5071,7 +5075,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
/* now let's copy this this into the output buffer */
memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
- h2s->mws -= fsize;
+ h2s->sws -= fsize;
h2c->mws -= fsize;
count -= fsize;