kernel: backport a few upstream flow offloading fixes

Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Felix Fietkau 2021-07-07 17:36:21 +02:00
parent 0fac6f5562
commit 64ed3d8056
8 changed files with 570 additions and 6 deletions

View File

@ -0,0 +1,63 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Sun, 18 Apr 2021 23:11:44 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: missing mutex
Patch 2ed37183abb7 ("netfilter: flowtable: separate replace, destroy and
stats to different workqueues") splits the workqueue per event type. Add
a mutex to serialize updates.
Fixes: 502e84e2382d ("net: ethernet: mtk_eth_soc: add flow offloading support")
Reported-by: Frank Wunderlich <frank-w@public-files.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -392,6 +392,8 @@ mtk_flow_offload_stats(struct mtk_eth *e
return 0;
}
+static DEFINE_MUTEX(mtk_flow_offload_mutex);
+
static int
mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{
@@ -399,6 +401,7 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_
struct net_device *dev = cb_priv;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int err;
if (!tc_can_offload(dev))
return -EOPNOTSUPP;
@@ -406,18 +409,24 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
+ mutex_lock(&mtk_flow_offload_mutex);
switch (cls->command) {
case FLOW_CLS_REPLACE:
- return mtk_flow_offload_replace(eth, cls);
+ err = mtk_flow_offload_replace(eth, cls);
+ break;
case FLOW_CLS_DESTROY:
- return mtk_flow_offload_destroy(eth, cls);
+ err = mtk_flow_offload_destroy(eth, cls);
+ break;
case FLOW_CLS_STATS:
- return mtk_flow_offload_stats(eth, cls);
+ err = mtk_flow_offload_stats(eth, cls);
+ break;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+ mutex_unlock(&mtk_flow_offload_mutex);
- return 0;
+ return err;
}
static int

View File

@ -0,0 +1,22 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Sun, 18 Apr 2021 23:11:45 +0200
Subject: [PATCH] net: ethernet: mtk_eth_soc: handle VLAN pop action
Do not hit EOPNOTSUPP when flowtable offload provides a VLAN pop action.
Fixes: efce49dfe6a8 ("netfilter: flowtable: add vlan pop action offload support")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -233,6 +233,8 @@ mtk_flow_offload_replace(struct mtk_eth
data.vlan.proto = act->vlan.proto;
data.vlan.num++;
break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
case FLOW_ACTION_PPPOE_PUSH:
if (data.pppoe.num == 1)
return -EOPNOTSUPP;

View File

@ -0,0 +1,159 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Sun, 28 Mar 2021 23:08:55 +0200
Subject: [PATCH] netfilter: flowtable: dst_check() from garbage collector path
Move dst_check() to the garbage collector path. Stale routes trigger the
flow entry teardown state which makes affected flows go back to the
classic forwarding path to re-evaluate flow offloading.
IPv6 requires the dst cookie to work, store it in the flow_tuple,
otherwise dst_check() always fails.
Fixes: e5075c0badaa ("netfilter: flowtable: call dst_check() to fall back to classic forwarding")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -129,7 +129,10 @@ struct flow_offload_tuple {
in_vlan_ingress:2;
u16 mtu;
union {
- struct dst_entry *dst_cache;
+ struct {
+ struct dst_entry *dst_cache;
+ u32 dst_cookie;
+ };
struct {
u32 ifidx;
u32 hw_ifidx;
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -74,6 +74,18 @@ err_ct_refcnt:
}
EXPORT_SYMBOL_GPL(flow_offload_alloc);
+static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
+{
+ const struct rt6_info *rt;
+
+ if (flow_tuple->l3proto == NFPROTO_IPV6) {
+ rt = (const struct rt6_info *)flow_tuple->dst_cache;
+ return rt6_get_cookie(rt);
+ }
+
+ return 0;
+}
+
static int flow_offload_fill_route(struct flow_offload *flow,
const struct nf_flow_route *route,
enum flow_offload_tuple_dir dir)
@@ -116,6 +128,7 @@ static int flow_offload_fill_route(struc
return -1;
flow_tuple->dst_cache = dst;
+ flow_tuple->dst_cookie = flow_offload_dst_cookie(flow_tuple);
break;
}
flow_tuple->xmit_type = route->tuple[dir].xmit_type;
@@ -389,11 +402,33 @@ nf_flow_table_iterate(struct nf_flowtabl
return err;
}
+static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
+{
+ struct dst_entry *dst;
+
+ if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
+ tuple->xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
+ dst = tuple->dst_cache;
+ if (!dst_check(dst, tuple->dst_cookie))
+ return true;
+ }
+
+ return false;
+}
+
+static bool nf_flow_has_stale_dst(struct flow_offload *flow)
+{
+ return flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple) ||
+ flow_offload_stale_dst(&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple);
+}
+
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
struct nf_flowtable *flow_table = data;
- if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
+ if (nf_flow_has_expired(flow) ||
+ nf_ct_is_dying(flow->ct) ||
+ nf_flow_has_stale_dst(flow))
set_bit(NF_FLOW_TEARDOWN, &flow->flags);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -364,15 +364,6 @@ nf_flow_offload_ip_hook(void *priv, stru
if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
return NF_ACCEPT;
- if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
- tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
- rt = (struct rtable *)tuplehash->tuple.dst_cache;
- if (!dst_check(&rt->dst, 0)) {
- flow_offload_teardown(flow);
- return NF_ACCEPT;
- }
- }
-
if (skb_try_make_writable(skb, thoff + hdrsize))
return NF_DROP;
@@ -391,6 +382,7 @@ nf_flow_offload_ip_hook(void *priv, stru
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+ rt = (struct rtable *)tuplehash->tuple.dst_cache;
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
IPCB(skb)->iif = skb->dev->ifindex;
IPCB(skb)->flags = IPSKB_FORWARDED;
@@ -399,6 +391,7 @@ nf_flow_offload_ip_hook(void *priv, stru
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
+ rt = (struct rtable *)tuplehash->tuple.dst_cache;
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
@@ -607,15 +600,6 @@ nf_flow_offload_ipv6_hook(void *priv, st
if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff))
return NF_ACCEPT;
- if (tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_NEIGH ||
- tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM) {
- rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
- if (!dst_check(&rt->dst, 0)) {
- flow_offload_teardown(flow);
- return NF_ACCEPT;
- }
- }
-
if (skb_try_make_writable(skb, thoff + hdrsize))
return NF_DROP;
@@ -633,6 +617,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len);
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
+ rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
IP6CB(skb)->iif = skb->dev->ifindex;
IP6CB(skb)->flags = IP6SKB_FORWARDED;
@@ -641,6 +626,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
+ rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);

View File

@ -0,0 +1,94 @@
From: Oz Shlomo <ozsh@nvidia.com>
Date: Thu, 3 Jun 2021 15:12:33 +0300
Subject: [PATCH] netfilter: conntrack: Introduce tcp offload timeout
configuration
TCP connections may be offloaded from nf conntrack to nf flow table.
Offloaded connections are aged after 30 seconds of inactivity.
Once aged, ownership is returned to conntrack with a hard coded pickup
time of 120 seconds, after which the connection may be deleted.
eted. The current aging intervals may be too aggressive for some users.
Provide users with the ability to control the nf flow table offload
aging and pickup time intervals via sysctl parameter as a pre-step for
configuring the nf flow table GC timeout intervals.
Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
Reviewed-by: Paul Blakey <paulb@nvidia.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -27,6 +27,10 @@ struct nf_tcp_net {
int tcp_loose;
int tcp_be_liberal;
int tcp_max_retrans;
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+ unsigned int offload_pickup;
+#endif
};
enum udp_conntrack {
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1438,6 +1438,11 @@ void nf_conntrack_tcp_init_net(struct ne
tn->tcp_loose = nf_ct_tcp_loose;
tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
+
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ tn->offload_timeout = 30 * HZ;
+ tn->offload_pickup = 120 * HZ;
+#endif
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -567,6 +567,10 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK,
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP,
+#endif
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
@@ -758,6 +762,20 @@ static struct ctl_table nf_ct_sysctl_tab
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD] = {
+ .procname = "nf_flowtable_tcp_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP] = {
+ .procname = "nf_flowtable_tcp_pickup",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
[NF_SYSCTL_CT_PROTO_TCP_LOOSE] = {
.procname = "nf_conntrack_tcp_loose",
.maxlen = sizeof(int),
@@ -967,6 +985,12 @@ static void nf_conntrack_standalone_init
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
#undef XASSIGN
+
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD_PICKUP].data = &tn->offload_pickup;
+#endif
+
}
static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net,

View File

@ -0,0 +1,92 @@
From: Oz Shlomo <ozsh@nvidia.com>
Date: Thu, 3 Jun 2021 15:12:34 +0300
Subject: [PATCH] netfilter: conntrack: Introduce udp offload timeout
configuration
UDP connections may be offloaded from nf conntrack to nf flow table.
Offloaded connections are aged after 30 seconds of inactivity.
Once aged, ownership is returned to conntrack with a hard coded pickup
time of 30 seconds, after which the connection may be deleted.
eted. The current aging intervals may be too aggressive for some users.
Provide users with the ability to control the nf flow table offload
aging and pickup time intervals via sysctl parameter as a pre-step for
configuring the nf flow table GC timeout intervals.
Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
Reviewed-by: Paul Blakey <paulb@nvidia.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -41,6 +41,10 @@ enum udp_conntrack {
struct nf_udp_net {
unsigned int timeouts[UDP_CT_MAX];
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ unsigned int offload_timeout;
+ unsigned int offload_pickup;
+#endif
};
struct nf_icmp_net {
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -270,6 +270,11 @@ void nf_conntrack_udp_init_net(struct ne
for (i = 0; i < UDP_CT_MAX; i++)
un->timeouts[i] = udp_timeouts[i];
+
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ un->offload_timeout = 30 * HZ;
+ un->offload_pickup = 30 * HZ;
+#endif
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp =
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -576,6 +576,10 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM,
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD,
+ NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP,
+#endif
NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP,
NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6,
#ifdef CONFIG_NF_CT_PROTO_SCTP
@@ -810,6 +814,20 @@ static struct ctl_table nf_ct_sysctl_tab
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
+#if IS_ENABLED(CONFIG_NFT_FLOW_OFFLOAD)
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = {
+ .procname = "nf_flowtable_udp_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP] = {
+ .procname = "nf_flowtable_udp_pickup",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = {
.procname = "nf_conntrack_icmp_timeout",
.maxlen = sizeof(unsigned int),
@@ -1078,6 +1096,10 @@ static int nf_conntrack_standalone_init_
table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout;
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED];
table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED];
+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout;
+ table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD_PICKUP].data = &un->offload_pickup;
+#endif
nf_conntrack_standalone_init_tcp_sysctl(net, table);
nf_conntrack_standalone_init_sctp_sysctl(net, table);

View File

@ -0,0 +1,134 @@
From: Oz Shlomo <ozsh@nvidia.com>
Date: Thu, 3 Jun 2021 15:12:35 +0300
Subject: [PATCH] netfilter: flowtable: Set offload timeouts according to proto
values
Currently the aging period for tcp/udp connections is hard coded to
30 seconds. Aged tcp/udp connections configure a hard coded 120/30
seconds pickup timeout for conntrack.
This configuration may be too aggressive or permissive for some users.
Dynamically configure the nf flow table GC timeout intervals according
to the user defined values.
Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
Reviewed-by: Paul Blakey <paulb@nvidia.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -174,6 +174,8 @@ struct flow_offload {
#define NF_FLOW_TIMEOUT (30 * HZ)
#define nf_flowtable_time_stamp (u32)jiffies
+unsigned long flow_offload_get_timeout(struct flow_offload *flow);
+
static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
{
return (__s32)(timeout - nf_flowtable_time_stamp);
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -175,12 +175,10 @@ static void flow_offload_fixup_tcp(struc
tcp->seen[1].td_maxwin = 0;
}
-#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
-#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-
static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
+ struct net *net = nf_ct_net(ct);
int l4num = nf_ct_protonum(ct);
unsigned int timeout;
@@ -188,12 +186,17 @@ static void flow_offload_fixup_ct_timeou
if (!l4proto)
return;
- if (l4num == IPPROTO_TCP)
- timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
- else if (l4num == IPPROTO_UDP)
- timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
- else
+ if (l4num == IPPROTO_TCP) {
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+ timeout = tn->offload_pickup;
+ } else if (l4num == IPPROTO_UDP) {
+ struct nf_udp_net *tn = nf_udp_pernet(net);
+
+ timeout = tn->offload_pickup;
+ } else {
return;
+ }
if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
ct->timeout = nfct_time_stamp + timeout;
@@ -265,11 +268,35 @@ static const struct rhashtable_params nf
.automatic_shrinking = true,
};
+unsigned long flow_offload_get_timeout(struct flow_offload *flow)
+{
+ const struct nf_conntrack_l4proto *l4proto;
+ unsigned long timeout = NF_FLOW_TIMEOUT;
+ struct net *net = nf_ct_net(flow->ct);
+ int l4num = nf_ct_protonum(flow->ct);
+
+ l4proto = nf_ct_l4proto_find(l4num);
+ if (!l4proto)
+ return timeout;
+
+ if (l4num == IPPROTO_TCP) {
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
+
+ timeout = tn->offload_timeout;
+ } else if (l4num == IPPROTO_UDP) {
+ struct nf_udp_net *tn = nf_udp_pernet(net);
+
+ timeout = tn->offload_timeout;
+ }
+
+ return timeout;
+}
+
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
{
int err;
- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
err = rhashtable_insert_fast(&flow_table->rhashtable,
&flow->tuplehash[0].node,
@@ -301,7 +328,7 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
void flow_offload_refresh(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
- flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
+ flow->timeout = nf_flowtable_time_stamp + flow_offload_get_timeout(flow);
if (likely(!nf_flowtable_hw_offload(flow_table)))
return;
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -885,7 +885,7 @@ static void flow_offload_work_stats(stru
lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
offload->flow->timeout = max_t(u64, offload->flow->timeout,
- lastused + NF_FLOW_TIMEOUT);
+ lastused + flow_offload_get_timeout(offload->flow));
if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
if (stats[0].pkts)
@@ -989,7 +989,7 @@ void nf_flow_offload_stats(struct nf_flo
__s32 delta;
delta = nf_flow_timeout_delta(flow->timeout);
- if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10))
+ if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10))
return;
offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);

View File

@ -767,7 +767,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -355,8 +354,7 @@ flow_offload_lookup(struct nf_flowtable
@@ -395,8 +394,7 @@ flow_offload_lookup(struct nf_flowtable
}
EXPORT_SYMBOL_GPL(flow_offload_lookup);
@ -777,13 +777,13 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
void (*iter)(struct flow_offload *flow, void *data),
void *data)
{
@@ -388,6 +386,7 @@ nf_flow_table_iterate(struct nf_flowtabl
@@ -428,6 +426,7 @@ nf_flow_table_iterate(struct nf_flowtabl
return err;
}
+EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
static bool flow_offload_stale_dst(struct flow_offload_tuple *tuple)
{
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h
@ -807,7 +807,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+#endif /* _XT_FLOWOFFLOAD_H */
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -265,6 +265,10 @@ void nf_flow_table_free(struct nf_flowta
@@ -270,6 +270,10 @@ void nf_flow_table_free(struct nf_flowta
void flow_offload_teardown(struct flow_offload *flow);

View File

@ -49,7 +49,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
static bool enable_hooks __read_mostly;
MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks");
module_param(enable_hooks, bool, 0000);
@@ -652,6 +655,7 @@ enum nf_ct_sysctl_index {
@@ -660,6 +663,7 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
#endif
@ -57,7 +57,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
__NF_SYSCTL_CT_LAST_SYSCTL,
};
@@ -978,6 +982,13 @@ static struct ctl_table nf_ct_sysctl_tab
@@ -1014,6 +1018,13 @@ static struct ctl_table nf_ct_sysctl_tab
.proc_handler = proc_dointvec_jiffies,
},
#endif