realtek: Add driver support for TC offloading

This adds support for offloading TC flower by using the Packet Inspection Engine
of the RTL-SoCs. Basic infrastructure support is provide with callbacks to the
tc subsystem and support for HW packet counters.

Signed-off-by: Birger Koblitz <git@birger-koblitz.de>
This commit is contained in:
Birger Koblitz 2021-09-07 15:30:29 +02:00 committed by John Crispin
parent a6678accbd
commit 54805fc911
8 changed files with 730 additions and 3 deletions

View File

@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NET_DSA_RTL83XX) += common.o dsa.o \
rtl838x.o rtl839x.o rtl930x.o rtl931x.o debugfs.o qos.o
rtl838x.o rtl839x.o rtl930x.o rtl931x.o debugfs.o qos.o tc.o

View File

@ -449,6 +449,62 @@ int rtl83xx_lag_del(struct dsa_switch *ds, int group, int port)
return 0;
}
/*
* Allocate a 64 bit octet counter located in the LOG HW table
*/
static int rtl83xx_octet_cntr_alloc(struct rtl838x_switch_priv *priv)
{
int idx;
mutex_lock(&priv->reg_mutex);
idx = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
if (idx >= priv->n_counters) {
mutex_unlock(&priv->reg_mutex);
return -1;
}
set_bit(idx, priv->octet_cntr_use_bm);
mutex_unlock(&priv->reg_mutex);
return idx;
}
/*
* Allocate a 32-bit packet counter
* 2 32-bit packet counters share the location of a 64-bit octet counter
* Initially there are no free packet counters and 2 new ones need to be freed
* by allocating the corresponding octet counter
*/
int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv)
{
int idx, j;
mutex_lock(&priv->reg_mutex);
/* Because initially no packet counters are free, the logic is reversed:
* a 0-bit means the counter is already allocated (for octets)
*/
idx = find_first_bit(priv->packet_cntr_use_bm, MAX_COUNTERS * 2);
if (idx >= priv->n_counters * 2) {
j = find_first_zero_bit(priv->octet_cntr_use_bm, MAX_COUNTERS);
if (j >= priv->n_counters) {
mutex_unlock(&priv->reg_mutex);
return -1;
}
set_bit(j, priv->octet_cntr_use_bm);
idx = j * 2;
set_bit(j * 2 + 1, priv->packet_cntr_use_bm);
} else {
clear_bit(idx, priv->packet_cntr_use_bm);
}
mutex_unlock(&priv->reg_mutex);
return idx;
}
static int rtl83xx_handle_changeupper(struct rtl838x_switch_priv *priv,
struct net_device *ndev,
struct netdev_notifier_changeupper_info *info)
@ -499,6 +555,30 @@ out:
return 0;
}
/*
* Is the lower network device a DSA slave network device of our RTL930X-switch?
* Unfortunately we cannot just follow dev->dsa_prt as this is only set for the
* DSA master device.
*/
int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv)
{
int i;
// TODO: On 5.12:
// if(!dsa_slave_dev_check(dev)) {
// netdev_info(dev, "%s: not a DSA device.\n", __func__);
// return -EINVAL;
// }
for (i = 0; i < priv->cpu_port; i++) {
if (!priv->ports[i].dp)
continue;
if (priv->ports[i].dp->slave == dev)
return i;
}
return -EINVAL;
}
static int rtl83xx_netdevice_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@ -568,6 +648,9 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
rtl8380_get_version(priv);
priv->n_lags = 8;
priv->l2_bucket_size = 4;
priv->n_pie_blocks = 12;
priv->port_ignore = 0x1f;
priv->n_counters = 128;
break;
case RTL8390_FAMILY_ID:
priv->ds->ops = &rtl83xx_switch_ops;
@ -581,6 +664,9 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
rtl8390_get_version(priv);
priv->n_lags = 16;
priv->l2_bucket_size = 4;
priv->n_pie_blocks = 18;
priv->port_ignore = 0x3f;
priv->n_counters = 1024;
break;
case RTL9300_FAMILY_ID:
priv->ds->ops = &rtl930x_switch_ops;
@ -595,6 +681,9 @@ static int __init rtl83xx_sw_probe(struct platform_device *pdev)
priv->n_lags = 16;
sw_w32(1, RTL930X_ST_CTRL);
priv->l2_bucket_size = 8;
priv->n_pie_blocks = 16;
priv->port_ignore = 0x3f;
priv->n_counters = 2048;
break;
case RTL9310_FAMILY_ID:
priv->ds->ops = &rtl930x_switch_ops;

View File

@ -188,6 +188,8 @@ static int rtl83xx_setup(struct dsa_switch *ds)
rtl83xx_enable_phy_polling(priv);
pr_debug("Please wait until PHY is settled\n");
msleep(1000);
priv->r->pie_init(priv);
return 0;
}
@ -228,6 +230,8 @@ static int rtl930x_setup(struct dsa_switch *ds)
rtl83xx_enable_phy_polling(priv);
priv->r->pie_init(priv);
return 0;
}

View File

@ -338,12 +338,45 @@
/* Debug features */
#define RTL930X_STAT_PRVTE_DROP_COUNTER0 (0xB5B8)
/* Packet Inspection Engine */
#define RTL838X_METER_GLB_CTRL (0x4B08)
#define RTL839X_METER_GLB_CTRL (0x1300)
#define RTL930X_METER_GLB_CTRL (0xa0a0)
#define RTL839X_ACL_CTRL (0x1288)
#define RTL838X_ACL_BLK_LOOKUP_CTRL (0x6100)
#define RTL839X_ACL_BLK_LOOKUP_CTRL (0x1280)
#define RTL930X_PIE_BLK_LOOKUP_CTRL (0xa5a0)
#define RTL838X_ACL_BLK_PWR_CTRL (0x6104)
#define RTL839X_PS_ACL_PWR_CTRL (0x049c)
#define RTL838X_ACL_BLK_TMPLTE_CTRL(block) (0x6108 + ((block) << 2))
#define RTL839X_ACL_BLK_TMPLTE_CTRL(block) (0x128c + ((block) << 2))
#define RTL930X_PIE_BLK_TMPLTE_CTRL(block) (0xa624 + ((block) << 2))
#define RTL838X_ACL_BLK_GROUP_CTRL (0x615C)
#define RTL839X_ACL_BLK_GROUP_CTRL (0x12ec)
#define RTL838X_ACL_CLR_CTRL (0x6168)
#define RTL839X_ACL_CLR_CTRL (0x12fc)
#define RTL930X_PIE_CLR_CTRL (0xa66c)
#define RTL838X_DMY_REG27 (0x3378)
#define RTL838X_ACL_PORT_LOOKUP_CTRL(p) (0x616C + (((p) << 2)))
#define RTL930X_ACL_PORT_LOOKUP_CTRL(p) (0xA784 + (((p) << 2)))
#define RTL930X_PIE_BLK_PHASE_CTRL (0xA5A4)
// PIE actions
#define PIE_ACT_COPY_TO_PORT 2
#define PIE_ACT_REDIRECT_TO_PORT 4
#define PIE_ACT_ROUTE_UC 6
#define PIE_ACT_VID_ASSIGN 0
#define MAX_VLANS 4096
#define MAX_LAGS 16
#define MAX_PRIOS 8
#define RTL930X_PORT_IGNORE 0x3f
#define MAX_MC_GROUPS 512
#define UNKNOWN_MC_PMASK (MAX_MC_GROUPS - 1)
#define PIE_BLOCK_SIZE 128
#define MAX_PIE_ENTRIES (18 * PIE_BLOCK_SIZE)
#define N_FIXED_FIELDS 12
#define MAX_COUNTERS 2048
enum phy_type {
PHY_NONE = 0,
@ -409,6 +442,166 @@ struct rtl838x_l2_entry {
bool nh_vlan_target; // Only RTL83xx: VLAN used for next hop
};
enum fwd_rule_action {
FWD_RULE_ACTION_NONE = 0,
FWD_RULE_ACTION_FWD = 1,
};
enum pie_phase {
PHASE_VACL = 0,
PHASE_IACL = 1,
};
/* Intermediate representation of a Packet Inspection Engine Rule
* as suggested by the Kernel's tc flower offload subsystem
* Field meaning is universal across SoC families, but data content is specific
* to SoC family (e.g. because of different port ranges) */
struct pie_rule {
int id;
enum pie_phase phase; // Phase in which this template is applied
int packet_cntr; // ID of a packet counter assigned to this rule
int octet_cntr; // ID of a byte counter assigned to this rule
u32 last_packet_cnt;
u64 last_octet_cnt;
// The following are requirements for the pie template
bool is_egress;
bool is_ipv6; // This is a rule with IPv6 fields
// Fixed fields that are always matched against on RTL8380
u8 spmmask_fix;
u8 spn; // Source port number
bool stacking_port; // Source port is stacking port
bool mgnt_vlan; // Packet arrived on management VLAN
bool dmac_hit_sw; // The packet's destination MAC matches one of the device's
bool content_too_deep; // The content of the packet cannot be parsed: too many layers
bool not_first_frag; // Not the first IP fragment
u8 frame_type_l4; // 0: UDP, 1: TCP, 2: ICMP/ICMPv6, 3: IGMP
u8 frame_type; // 0: ARP, 1: L2 only, 2: IPv4, 3: IPv6
bool otag_fmt; // 0: outer tag packet, 1: outer priority tag or untagged
bool itag_fmt; // 0: inner tag packet, 1: inner priority tag or untagged
bool otag_exist; // packet with outer tag
bool itag_exist; // packet with inner tag
bool frame_type_l2; // 0: Ethernet, 1: LLC_SNAP, 2: LLC_Other, 3: Reserved
bool igr_normal_port; // Ingress port is not cpu or stacking port
u8 tid; // The template ID defining the what the templated fields mean
// Masks for the fields that are always matched against on RTL8380
u8 spmmask_fix_m;
u8 spn_m;
bool stacking_port_m;
bool mgnt_vlan_m;
bool dmac_hit_sw_m;
bool content_too_deep_m;
bool not_first_frag_m;
u8 frame_type_l4_m;
u8 frame_type_m;
bool otag_fmt_m;
bool itag_fmt_m;
bool otag_exist_m;
bool itag_exist_m;
bool frame_type_l2_m;
bool igr_normal_port_m;
u8 tid_m;
// Logical operations between rules, special rules for rule numbers apply
bool valid;
bool cond_not; // Matches when conditions not match
bool cond_and1; // And this rule 2n with the next rule 2n+1 in same block
bool cond_and2; // And this rule m in block 2n with rule m in block 2n+1
bool ivalid;
// Actions to be performed
bool drop; // Drop the packet
bool fwd_sel; // Forward packet: to port, portmask, dest route, next rule, drop
bool ovid_sel; // So something to outer vlan-id: shift, re-assign
bool ivid_sel; // Do something to inner vlan-id: shift, re-assign
bool flt_sel; // Filter the packet when sending to certain ports
bool log_sel; // Log the packet in one of the LOG-table counters
bool rmk_sel; // Re-mark the packet, i.e. change the priority-tag
bool meter_sel; // Meter the packet, i.e. limit rate of this type of packet
bool tagst_sel; // Change the ergress tag
bool mir_sel; // Mirror the packet to a Link Aggregation Group
bool nopri_sel; // Change the normal priority
bool cpupri_sel; // Change the CPU priority
bool otpid_sel; // Change Outer Tag Protocol Identifier (802.1q)
bool itpid_sel; // Change Inner Tag Protocol Identifier (802.1q)
bool shaper_sel; // Apply traffic shaper
bool mpls_sel; // MPLS actions
bool bypass_sel; // Bypass actions
bool fwd_sa_lrn; // Learn the source address when forwarding
bool fwd_mod_to_cpu; // Forward the modified VLAN tag format to CPU-port
// Fields used in predefined templates 0-2 on RTL8380 / 90 / 9300
u64 spm; // Source Port Matrix
u16 otag; // Outer VLAN-ID
u8 smac[ETH_ALEN]; // Source MAC address
u8 dmac[ETH_ALEN]; // Destination MAC address
u16 ethertype; // Ethernet frame type field in ethernet header
u16 itag; // Inner VLAN-ID
u16 field_range_check;
u32 sip; // Source IP
struct in6_addr sip6; // IPv6 Source IP
u32 dip; // Destination IP
struct in6_addr dip6; // IPv6 Destination IP
u16 tos_proto; // IPv4: TOS + Protocol fields, IPv6: Traffic class + next header
u16 sport; // TCP/UDP source port
u16 dport; // TCP/UDP destination port
u16 icmp_igmp;
u16 tcp_info;
u16 dsap_ssap; // Destination / Source Service Access Point bytes (802.3)
u64 spm_m;
u16 otag_m;
u8 smac_m[ETH_ALEN];
u8 dmac_m[ETH_ALEN];
u8 ethertype_m;
u16 itag_m;
u16 field_range_check_m;
u32 sip_m;
struct in6_addr sip6_m; // IPv6 Source IP mask
u32 dip_m;
struct in6_addr dip6_m; // IPv6 Destination IP mask
u16 tos_proto_m;
u16 sport_m;
u16 dport_m;
u16 icmp_igmp_m;
u16 tcp_info_m;
u16 dsap_ssap_m;
// Data associated with actions
u8 fwd_act; // Type of forwarding action
// 0: permit, 1: drop, 2: copy to port id, 4: copy to portmask
// 4: redirect to portid, 5: redirect to portmask
// 6: route, 7: vlan leaky (only 8380)
u16 fwd_data; // Additional data for forwarding action, e.g. destination port
u8 ovid_act;
u16 ovid_data; // Outer VLAN ID
u8 ivid_act;
u16 ivid_data; // Inner VLAN ID
u16 flt_data; // Filtering data
u16 log_data; // ID of packet or octet counter in LOG table, on RTL93xx
// unnecessary since PIE-Rule-ID == LOG-counter-ID
bool log_octets;
u8 mpls_act; // MPLS action type
u16 mpls_lib_idx; // MPLS action data
u16 rmk_data; // Data for remarking
u16 meter_data; // ID of meter for bandwidth control
u16 tagst_data;
u16 mir_data;
u16 nopri_data;
u16 cpupri_data;
u16 otpid_data;
u16 itpid_data;
u16 shaper_data;
// Bypass actions, ignored on RTL8380
bool bypass_all; // Not clear
bool bypass_igr_stp; // Bypass Ingress STP state
bool bypass_ibc_sc; // Bypass Ingress Bandwidth Control and Storm Control
};
struct rtl838x_nexthop {
u16 id; // ID in HW Nexthop table
u32 ip; // IP Addres of nexthop
@ -424,6 +617,15 @@ struct rtl838x_nexthop {
struct rtl838x_switch_priv;
struct rtl83xx_flow {
unsigned long cookie;
struct rhash_head node;
struct rcu_head rcu_head;
struct rtl838x_switch_priv *priv;
struct pie_rule rule;
u32 flags;
};
struct rtl838x_reg {
void (*mask_port_reg_be)(u64 clear, u64 set, int reg);
void (*set_port_reg_be)(u64 set, int reg);
@ -491,6 +693,13 @@ struct rtl838x_reg {
u64 (*read_mcast_pmask)(int idx);
void (*write_mcast_pmask)(int idx, u64 portmask);
void (*vlan_fwd_on_inner)(int port, bool is_set);
void (*pie_init)(struct rtl838x_switch_priv *priv);
int (*pie_rule_read)(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr);
int (*pie_rule_write)(struct rtl838x_switch_priv *priv, int idx, struct pie_rule *pr);
int (*pie_rule_add)(struct rtl838x_switch_priv *priv, struct pie_rule *rule);
void (*pie_rule_rm)(struct rtl838x_switch_priv *priv, struct pie_rule *rule);
u32 (*packet_cntr_read)(int counter);
void (*packet_cntr_clear)(int counter);
};
struct rtl838x_switch_priv {
@ -501,7 +710,8 @@ struct rtl838x_switch_priv {
u16 family_id;
char version;
struct rtl838x_port ports[57];
struct mutex reg_mutex;
struct mutex reg_mutex; // Mutex for individual register manipulations
struct mutex pie_mutex; // Mutex for Packet Inspection Engine
int link_state_irq;
int mirror_group_ports[4];
struct mii_bus *mii_bus;
@ -509,6 +719,7 @@ struct rtl838x_switch_priv {
u8 cpu_port;
u8 port_mask;
u8 port_width;
u8 port_ignore;
u64 irq_mask;
u32 fib_entries;
int l2_bucket_size;
@ -519,6 +730,12 @@ struct rtl838x_switch_priv {
struct notifier_block nb;
bool eee_enabled;
unsigned long int mc_group_bm[MAX_MC_GROUPS >> 5];
int n_pie_blocks;
struct rhashtable tc_ht;
unsigned long int pie_use_bm[MAX_PIE_ENTRIES >> 5];
int n_counters;
unsigned long int octet_cntr_use_bm[MAX_COUNTERS >> 5];
unsigned long int packet_cntr_use_bm[MAX_COUNTERS >> 4];
};
void rtl838x_dbgfs_init(struct rtl838x_switch_priv *priv);

View File

@ -74,6 +74,11 @@ inline u32 rtl_table_data_r(struct table_reg *r, int i);
inline void rtl_table_data_w(struct table_reg *r, u32 v, int i);
void __init rtl83xx_setup_qos(struct rtl838x_switch_priv *priv);
int rtl83xx_packet_cntr_alloc(struct rtl838x_switch_priv *priv);
int rtl83xx_port_is_under(const struct net_device * dev, struct rtl838x_switch_priv *priv);
int read_phy(u32 port, u32 page, u32 reg, u32 *val);
int write_phy(u32 port, u32 page, u32 reg, u32 val);

View File

@ -0,0 +1,409 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <net/dsa.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <net/flow_offload.h>
#include <linux/rhashtable.h>
#include <asm/mach-rtl838x/mach-rtl83xx.h>
#include "rtl83xx.h"
#include "rtl838x.h"
/*
* Parse the flow rule for the matching conditions
*/
static int rtl83xx_parse_flow_rule(struct rtl838x_switch_priv *priv,
struct flow_rule *rule, struct rtl83xx_flow *flow)
{
struct flow_dissector *dissector = rule->match.dissector;
pr_debug("In %s\n", __func__);
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
(dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
pr_err("Cannot form TC key: used_keys = 0x%x\n", dissector->used_keys);
return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
pr_debug("%s: BASIC\n", __func__);
flow_rule_match_basic(rule, &match);
if (match.key->n_proto == htons(ETH_P_ARP))
flow->rule.frame_type = 0;
if (match.key->n_proto == htons(ETH_P_IP))
flow->rule.frame_type = 2;
if (match.key->n_proto == htons(ETH_P_IPV6))
flow->rule.frame_type = 3;
if ((match.key->n_proto == htons(ETH_P_ARP)) || flow->rule.frame_type)
flow->rule.frame_type_m = 3;
if (flow->rule.frame_type >= 2) {
if (match.key->ip_proto == IPPROTO_UDP)
flow->rule.frame_type_l4 = 0;
if (match.key->ip_proto == IPPROTO_TCP)
flow->rule.frame_type_l4 = 1;
if (match.key->ip_proto == IPPROTO_ICMP
|| match.key->ip_proto ==IPPROTO_ICMPV6)
flow->rule.frame_type_l4 = 2;
if (match.key->ip_proto == IPPROTO_TCP)
flow->rule.frame_type_l4 = 3;
if ((match.key->ip_proto == IPPROTO_UDP) || flow->rule.frame_type_l4)
flow->rule.frame_type_l4_m = 7;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
pr_debug("%s: ETH_ADDR\n", __func__);
flow_rule_match_eth_addrs(rule, &match);
ether_addr_copy(flow->rule.dmac, match.key->dst);
ether_addr_copy(flow->rule.dmac_m, match.mask->dst);
ether_addr_copy(flow->rule.smac, match.key->src);
ether_addr_copy(flow->rule.smac_m, match.mask->src);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
pr_debug("%s: VLAN\n", __func__);
flow_rule_match_vlan(rule, &match);
flow->rule.itag = match.key->vlan_id;
flow->rule.itag_m = match.mask->vlan_id;
// TODO: What about match.key->vlan_priority ?
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
pr_debug("%s: IPV4\n", __func__);
flow_rule_match_ipv4_addrs(rule, &match);
flow->rule.is_ipv6 = false;
flow->rule.dip = match.key->dst;
flow->rule.dip_m = match.mask->dst;
flow->rule.sip = match.key->src;
flow->rule.sip_m = match.mask->src;
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
struct flow_match_ipv6_addrs match;
pr_debug("%s: IPV6\n", __func__);
flow->rule.is_ipv6 = true;
flow_rule_match_ipv6_addrs(rule, &match);
flow->rule.dip6 = match.key->dst;
flow->rule.dip6_m = match.mask->dst;
flow->rule.sip6 = match.key->src;
flow->rule.sip6_m = match.mask->src;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
pr_debug("%s: PORTS\n", __func__);
flow_rule_match_ports(rule, &match);
flow->rule.dport = match.key->dst;
flow->rule.dport_m = match.mask->dst;
flow->rule.sport = match.key->src;
flow->rule.sport_m = match.mask->src;
}
// TODO: ICMP
return 0;
}
static void rtl83xx_flow_bypass_all(struct rtl83xx_flow *flow)
{
flow->rule.bypass_sel = true;
flow->rule.bypass_all = true;
flow->rule.bypass_igr_stp = true;
flow->rule.bypass_ibc_sc = true;
}
static int rtl83xx_parse_fwd(struct rtl838x_switch_priv *priv,
const struct flow_action_entry *act, struct rtl83xx_flow *flow)
{
struct net_device *dev = act->dev;
int port;
port = rtl83xx_port_is_under(dev, priv);
if (port < 0) {
netdev_info(dev, "%s: not a DSA device.\n", __func__);
return -EINVAL;
}
flow->rule.fwd_sel = true;
flow->rule.fwd_data = port;
pr_debug("Using port index: %d\n", port);
rtl83xx_flow_bypass_all(flow);
return 0;
}
static int rtl83xx_add_flow(struct rtl838x_switch_priv *priv, struct flow_cls_offload *f,
struct rtl83xx_flow *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
const struct flow_action_entry *act;
int i, err;
pr_debug("%s\n", __func__);
rtl83xx_parse_flow_rule(priv, rule, flow);
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_DROP:
pr_debug("%s: DROP\n", __func__);
flow->rule.drop = true;
rtl83xx_flow_bypass_all(flow);
return 0;
case FLOW_ACTION_TRAP:
pr_debug("%s: TRAP\n", __func__);
flow->rule.fwd_data = priv->cpu_port;
flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
rtl83xx_flow_bypass_all(flow);
break;
case FLOW_ACTION_MANGLE:
pr_err("%s: FLOW_ACTION_MANGLE not supported\n", __func__);
return -EOPNOTSUPP;
case FLOW_ACTION_ADD:
pr_err("%s: FLOW_ACTION_ADD not supported\n", __func__);
return -EOPNOTSUPP;
case FLOW_ACTION_VLAN_PUSH:
pr_debug("%s: VLAN_PUSH\n", __func__);
// TODO: act->vlan.proto
flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
flow->rule.ivid_sel = true;
flow->rule.ivid_data = htons(act->vlan.vid);
flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
flow->rule.ovid_sel = true;
flow->rule.ovid_data = htons(act->vlan.vid);
flow->rule.fwd_mod_to_cpu = true;
break;
case FLOW_ACTION_VLAN_POP:
pr_debug("%s: VLAN_POP\n", __func__);
flow->rule.ivid_act = PIE_ACT_VID_ASSIGN;
flow->rule.ivid_data = 0;
flow->rule.ivid_sel = true;
flow->rule.ovid_act = PIE_ACT_VID_ASSIGN;
flow->rule.ovid_data = 0;
flow->rule.ovid_sel = true;
flow->rule.fwd_mod_to_cpu = true;
break;
case FLOW_ACTION_CSUM:
pr_err("%s: FLOW_ACTION_CSUM not supported\n", __func__);
return -EOPNOTSUPP;
case FLOW_ACTION_REDIRECT:
pr_debug("%s: REDIRECT\n", __func__);
err = rtl83xx_parse_fwd(priv, act, flow);
if (err)
return err;
flow->rule.fwd_act = PIE_ACT_REDIRECT_TO_PORT;
break;
case FLOW_ACTION_MIRRED:
pr_debug("%s: MIRRED\n", __func__);
err = rtl83xx_parse_fwd(priv, act, flow);
if (err)
return err;
flow->rule.fwd_act = PIE_ACT_COPY_TO_PORT;
break;
default:
pr_err("%s: Flow action not supported: %d\n", __func__, act->id);
return -EOPNOTSUPP;
}
}
return 0;
}
static const struct rhashtable_params tc_ht_params = {
.head_offset = offsetof(struct rtl83xx_flow, node),
.key_offset = offsetof(struct rtl83xx_flow, cookie),
.key_len = sizeof(((struct rtl83xx_flow *)0)->cookie),
.automatic_shrinking = true,
};
static int rtl83xx_configure_flower(struct rtl838x_switch_priv *priv,
struct flow_cls_offload *f)
{
struct rtl83xx_flow *flow;
int err = 0;
pr_debug("In %s\n", __func__);
rcu_read_lock();
pr_debug("Cookie %08lx\n", f->cookie);
flow = rhashtable_lookup(&priv->tc_ht, &f->cookie, tc_ht_params);
if (flow) {
pr_info("%s: Got flow\n", __func__);
err = -EEXIST;
goto rcu_unlock;
}
rcu_unlock:
rcu_read_unlock();
if (flow)
goto out;
pr_debug("%s: New flow\n", __func__);
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow) {
err = -ENOMEM;
goto out;
}
flow->cookie = f->cookie;
flow->priv = priv;
err = rhashtable_insert_fast(&priv->tc_ht, &flow->node, tc_ht_params);
if (err) {
pr_err("Could not insert add new rule\n");
goto out_free;
}
rtl83xx_add_flow(priv, f, flow); // TODO: check error
// Add log action to flow
flow->rule.packet_cntr = rtl83xx_packet_cntr_alloc(priv);
if (flow->rule.packet_cntr >= 0) {
pr_debug("Using packet counter %d\n", flow->rule.packet_cntr);
flow->rule.log_sel = true;
flow->rule.log_data = flow->rule.packet_cntr;
}
err = priv->r->pie_rule_add(priv, &flow->rule);
return err;
out_free:
kfree(flow);
out:
pr_err("%s: error %d\n", __func__, err);
return err;
}
static int rtl83xx_delete_flower(struct rtl838x_switch_priv *priv,
struct flow_cls_offload * cls_flower)
{
struct rtl83xx_flow *flow;
pr_debug("In %s\n", __func__);
rcu_read_lock();
flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
if (!flow) {
rcu_read_unlock();
return -EINVAL;
}
priv->r->pie_rule_rm(priv, &flow->rule);
rhashtable_remove_fast(&priv->tc_ht, &flow->node, tc_ht_params);
kfree_rcu(flow, rcu_head);
rcu_read_unlock();
return 0;
}
static int rtl83xx_stats_flower(struct rtl838x_switch_priv *priv,
struct flow_cls_offload * cls_flower)
{
struct rtl83xx_flow *flow;
unsigned long lastused = 0;
int total_packets, new_packets;
pr_debug("%s: \n", __func__);
flow = rhashtable_lookup_fast(&priv->tc_ht, &cls_flower->cookie, tc_ht_params);
if (!flow)
return -1;
if (flow->rule.packet_cntr >= 0) {
total_packets = priv->r->packet_cntr_read(flow->rule.packet_cntr);
pr_debug("Total packets: %d\n", total_packets);
new_packets = total_packets - flow->rule.last_packet_cnt;
flow->rule.last_packet_cnt = total_packets;
}
// TODO: We need a second PIE rule to count the bytes
flow_stats_update(&cls_flower->stats, 100 * new_packets, new_packets, 0, lastused,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
static int rtl83xx_setup_tc_cls_flower(struct rtl838x_switch_priv *priv,
struct flow_cls_offload *cls_flower)
{
pr_debug("%s: %d\n", __func__, cls_flower->command);
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
return rtl83xx_configure_flower(priv, cls_flower);
case FLOW_CLS_DESTROY:
return rtl83xx_delete_flower(priv, cls_flower);
case FLOW_CLS_STATS:
return rtl83xx_stats_flower(priv, cls_flower);
default:
return -EOPNOTSUPP;
}
}
static int rtl83xx_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct rtl838x_switch_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
pr_debug("%s: TC_SETUP_CLSFLOWER\n", __func__);
return rtl83xx_setup_tc_cls_flower(priv, type_data);
default:
return -EOPNOTSUPP;
}
}
static LIST_HEAD(rtl83xx_block_cb_list);
int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
{
struct rtl838x_switch_priv *priv;
struct flow_block_offload *f = type_data;
static bool first_time = true;
int err;
pr_debug("%s: %d\n", __func__, type);
if(!netdev_uses_dsa(dev)) {
pr_err("%s: no DSA\n", __func__);
return 0;
}
priv = dev->dsa_ptr->ds->priv;
switch (type) {
case TC_SETUP_BLOCK:
if (first_time) {
first_time = false;
err = rhashtable_init(&priv->tc_ht, &tc_ht_params);
if (err)
pr_err("%s: Could not initialize hash table\n", __func__);
}
f->unlocked_driver_cb = true;
return flow_block_cb_setup_simple(type_data,
&rtl83xx_block_cb_list,
rtl83xx_setup_tc_block_cb,
priv, priv, true);
default:
return -EOPNOTSUPP;
}
return 0;
}

View File

@ -1927,6 +1927,7 @@ static const struct net_device_ops rtl838x_eth_netdev_ops = {
.ndo_tx_timeout = rtl838x_eth_tx_timeout,
.ndo_set_features = rtl83xx_set_features,
.ndo_fix_features = rtl838x_fix_features,
.ndo_setup_tc = rtl83xx_setup_tc,
};
static const struct net_device_ops rtl839x_eth_netdev_ops = {
@ -1940,6 +1941,7 @@ static const struct net_device_ops rtl839x_eth_netdev_ops = {
.ndo_tx_timeout = rtl838x_eth_tx_timeout,
.ndo_set_features = rtl83xx_set_features,
.ndo_fix_features = rtl838x_fix_features,
.ndo_setup_tc = rtl83xx_setup_tc,
};
static const struct net_device_ops rtl930x_eth_netdev_ops = {
@ -1953,6 +1955,7 @@ static const struct net_device_ops rtl930x_eth_netdev_ops = {
.ndo_tx_timeout = rtl838x_eth_tx_timeout,
.ndo_set_features = rtl93xx_set_features,
.ndo_fix_features = rtl838x_fix_features,
.ndo_setup_tc = rtl83xx_setup_tc,
};
static const struct net_device_ops rtl931x_eth_netdev_ops = {

View File

@ -424,6 +424,6 @@ int rtl930x_write_phy(u32 port, u32 page, u32 reg, u32 val);
int rtl930x_read_phy(u32 port, u32 page, u32 reg, u32 *val);
int rtl931x_write_phy(u32 port, u32 page, u32 reg, u32 val);
int rtl931x_read_phy(u32 port, u32 page, u32 reg, u32 *val);
void rtl9300_sds_power(int sds_num, int val);
int rtl83xx_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data);
#endif /* _RTL838X_ETH_H */