From: Eric Dumazet Date: Fri, 6 May 2016 08:55:12 -0700 Subject: [PATCH] fq_codel: add memory limitation per queue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On small embedded routers, one wants to control maximal amount of memory used by fq_codel, instead of controlling number of packets or bytes, since GRO/TSO make these not practical. Assuming skb->truesize is accurate, we have to keep track of skb->truesize sum for skbs in queue. This patch adds a new TCA_FQ_CODEL_MEMORY_LIMIT attribute. I chose a default value of 32 MBytes, which looks reasonable even for heavy duty usages. (Prior fq_codel users should not be hurt when they upgrade their kernels) Two fields are added to tc_fq_codel_qd_stats to report : - Current memory usage - Number of drops caused by memory limits # tc qd replace dev eth1 root est 1sec 4sec fq_codel memory_limit 4M .. # tc -s -d qd sh dev eth1 qdisc fq_codel 8008: root refcnt 257 limit 10240p flows 1024 quantum 1514 target 5.0ms interval 100.0ms memory_limit 4Mb ecn Sent 2083566791363 bytes 1376214889 pkt (dropped 4994406, overlimits 0 requeues 21705223) rate 9841Mbit 812549pps backlog 3906120b 376p requeues 21705223 maxpacket 68130 drop_overlimit 4994406 new_flow_count 28855414 ecn_mark 0 memory_used 4190048 drop_overmemory 4994406 new_flows_len 1 old_flows_len 177 Signed-off-by: Eric Dumazet Cc: Jesper Dangaard Brouer Cc: Dave Täht Cc: Sebastian Möller Signed-off-by: David S. Miller --- --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -712,6 +712,7 @@ enum { TCA_FQ_CODEL_QUANTUM, TCA_FQ_CODEL_CE_THRESHOLD, TCA_FQ_CODEL_DROP_BATCH_SIZE, + TCA_FQ_CODEL_MEMORY_LIMIT, __TCA_FQ_CODEL_MAX }; @@ -736,6 +737,8 @@ struct tc_fq_codel_qd_stats { __u32 new_flows_len; /* count of flows in new list */ __u32 old_flows_len; /* count of flows in old list */ __u32 ce_mark; /* packets above ce_threshold */ + __u32 memory_usage; /* in bytes */ + __u32 drop_overmemory; }; struct tc_fq_codel_cl_stats { --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -58,8 +58,11 @@ struct fq_codel_sched_data { u32 perturbation; /* hash perturbation */ u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ u32 drop_batch_size; + u32 memory_limit; struct codel_params cparams; struct codel_stats cstats; + u32 memory_usage; + u32 drop_overmemory; u32 drop_overlimit; u32 new_flow_count; @@ -141,6 +144,7 @@ static unsigned int fq_codel_drop(struct unsigned int maxbacklog = 0, idx = 0, i, len; struct fq_codel_flow *flow; unsigned int threshold; + unsigned int mem = 0; /* Queue is full! Find the fat flow and drop packet(s) from it. * This might sound expensive, but with 1024 flows, we scan @@ -165,11 +169,13 @@ static unsigned int fq_codel_drop(struct do { skb = dequeue_head(flow); len += qdisc_pkt_len(skb); + mem += skb->truesize; kfree_skb(skb); } while (++i < max_packets && len < threshold); flow->dropped += i; q->backlogs[idx] -= len; + q->memory_usage -= mem; sch->qstats.drops += i; sch->qstats.backlog -= len; sch->q.qlen -= i; @@ -191,6 +197,7 @@ static int fq_codel_enqueue(struct sk_bu unsigned int idx, prev_backlog, prev_qlen; struct fq_codel_flow *flow; int uninitialized_var(ret); + bool memory_limited; idx = fq_codel_classify(skb, sch, &ret); if (idx == 0) { @@ -213,7 +220,9 @@ static int fq_codel_enqueue(struct sk_bu flow->deficit = q->quantum; flow->dropped = 0; } - if (++sch->q.qlen <= sch->limit) + q->memory_usage += skb->truesize; + memory_limited = q->memory_usage > q->memory_limit; + if (++sch->q.qlen <= sch->limit && !memory_limited) return NET_XMIT_SUCCESS; prev_backlog = sch->qstats.backlog; @@ -227,7 +236,8 @@ static int fq_codel_enqueue(struct sk_bu ret = fq_codel_drop(sch, q->drop_batch_size); q->drop_overlimit += prev_qlen - sch->q.qlen; - + if (memory_limited) + q->drop_overmemory += prev_qlen - sch->q.qlen; /* As we dropped packet(s), better let upper stack know this */ qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, prev_backlog - sch->qstats.backlog); @@ -296,6 +306,7 @@ begin: list_del_init(&flow->flowchain); goto begin; } + q->memory_usage -= skb->truesize; qdisc_bstats_update(sch, skb); flow->deficit -= qdisc_pkt_len(skb); /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, @@ -343,6 +354,7 @@ static const struct nla_policy fq_codel_ [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, + [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 }, }; static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt) @@ -397,7 +409,11 @@ static int fq_codel_change(struct Qdisc if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); - while (sch->q.qlen > sch->limit) { + if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) + q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); + + while (sch->q.qlen > sch->limit || + q->memory_usage > q->memory_limit) { struct sk_buff *skb = fq_codel_dequeue(sch); q->cstats.drop_len += qdisc_pkt_len(skb); @@ -442,6 +458,7 @@ static int fq_codel_init(struct Qdisc *s sch->limit = 10*1024; q->flows_cnt = 1024; + q->memory_limit = 32 << 20; /* 32 MBytes */ q->drop_batch_size = 64; q->quantum = psched_mtu(qdisc_dev(sch)); q->perturbation = prandom_u32(); @@ -502,6 +519,8 @@ static int fq_codel_dump(struct Qdisc *s q->quantum) || nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, q->drop_batch_size) || + nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, + q->memory_limit) || nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, q->flows_cnt)) goto nla_put_failure; @@ -530,6 +549,8 @@ static int fq_codel_dump_stats(struct Qd st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; st.qdisc_stats.new_flow_count = q->new_flow_count; st.qdisc_stats.ce_mark = q->cstats.ce_mark; + st.qdisc_stats.memory_usage = q->memory_usage; + st.qdisc_stats.drop_overmemory = q->drop_overmemory; list_for_each(pos, &q->new_flows) st.qdisc_stats.new_flows_len++;