From a9b204d1564702b704ad6fe74f10a102c7b87ba3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 3 Dec 2016 11:14:53 -0800 Subject: [PATCH 04/10] tcp: tsq: avoid one atomic in tcp_wfree() Under high load, tcp_wfree() has an atomic operation trying to schedule a tasklet over and over. We can schedule it only if our per cpu list was empty. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_output.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -888,6 +888,7 @@ void tcp_wfree(struct sk_buff *skb) for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) { struct tsq_tasklet *tsq; + bool empty; if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) goto out; @@ -900,8 +901,10 @@ void tcp_wfree(struct sk_buff *skb) /* queue this socket to tasklet queue */ local_irq_save(flags); tsq = this_cpu_ptr(&tsq_tasklet); + empty = list_empty(&tsq->head); list_add(&tp->tsq_node, &tsq->head); - tasklet_schedule(&tsq->tasklet); + if (empty) + tasklet_schedule(&tsq->tasklet); local_irq_restore(flags); return; }