diff options
author | Felix Fietkau <nbd@nbd.name> | 2023-03-23 14:23:19 +0100 |
---|---|---|
committer | Felix Fietkau <nbd@nbd.name> | 2023-03-23 17:54:18 +0100 |
commit | aa2777145f8d392006196d8501f5520d19c05f11 (patch) | |
tree | 752b21509b5c552c471a19d5e3038807ee72fb5f /target/linux/generic | |
parent | d0a06965e8ab0a92a4813fddbb8e045407897310 (diff) | |
download | upstream-aa2777145f8d392006196d8501f5520d19c05f11.tar.gz upstream-aa2777145f8d392006196d8501f5520d19c05f11.tar.bz2 upstream-aa2777145f8d392006196d8501f5520d19c05f11.zip |
kernel: improve mtk ppe flow accounting
Properly track L2 flows, and ensure that stale data gets cleared
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'target/linux/generic')
3 files changed, 655 insertions, 0 deletions
diff --git a/target/linux/generic/pending-5.15/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch b/target/linux/generic/pending-5.15/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch new file mode 100644 index 0000000000..f43d15d114 --- /dev/null +++ b/target/linux/generic/pending-5.15/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch @@ -0,0 +1,314 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 23 Mar 2023 10:24:11 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of + offloaded flows + +Unify tracking of L2 and L3 flows. Use the generic list field in struct +mtk_foe_entry for tracking L2 subflows. Preparation for improving +flow accounting support. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -466,26 +466,30 @@ int mtk_foe_entry_set_queue(struct mtk_e + return 0; + } + ++static int ++mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry) ++{ ++ int type = mtk_get_ib1_pkt_type(eth, entry->ib1); ++ ++ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) ++ return offsetof(struct mtk_foe_entry, ipv6._rsv); ++ else ++ return offsetof(struct mtk_foe_entry, ipv4.ib2); ++} ++ + static bool + mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, +- struct mtk_foe_entry *data) ++ struct mtk_foe_entry *data, int len) + { +- int type, len; +- + if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) + return false; + +- type = mtk_get_ib1_pkt_type(eth, entry->data.ib1); +- if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) +- len = offsetof(struct mtk_foe_entry, ipv6._rsv); +- else +- len = offsetof(struct mtk_foe_entry, ipv4.ib2); +- + return !memcmp(&entry->data.data, &data->data, len - 4); + } + + static void +-__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ bool set_state) + { + struct hlist_head *head; + struct hlist_node *tmp; +@@ -495,13 +499,12 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp + mtk_flow_l2_ht_params); + + head = &entry->l2_flows; +- hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) +- __mtk_foe_entry_clear(ppe, entry); ++ hlist_for_each_entry_safe(entry, tmp, head, list) ++ __mtk_foe_entry_clear(ppe, entry, set_state); + return; + } + +- hlist_del_init(&entry->list); +- if (entry->hash != 0xffff) { ++ if (entry->hash != 0xffff && set_state) { + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); + + hwe->ib1 &= ~MTK_FOE_IB1_STATE; +@@ -520,7 +523,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp + if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) + return; + +- hlist_del_init(&entry->l2_data.list); ++ hlist_del_init(&entry->list); + kfree(entry); + } + +@@ -536,66 +539,55 @@ static int __mtk_foe_entry_idle_time(str + return now - timestamp; + } + ++static bool ++mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ struct mtk_foe_entry foe = {}; ++ struct mtk_foe_entry *hwe; ++ u16 hash = entry->hash; ++ int len; ++ ++ if (hash == 0xffff) ++ return false; ++ ++ hwe = mtk_foe_get_entry(ppe, hash); ++ len = mtk_flow_entry_match_len(ppe->eth, &entry->data); ++ memcpy(&foe, hwe, len); ++ ++ if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) || ++ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) ++ return false; ++ ++ entry->data.ib1 = foe.ib1; ++ ++ return true; ++} ++ + static void + mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); + struct mtk_flow_entry *cur; +- struct mtk_foe_entry *hwe; + struct hlist_node *tmp; + int idle; + + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); +- hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { ++ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, list) { + int cur_idle; +- u32 ib1; +- +- hwe = mtk_foe_get_entry(ppe, cur->hash); +- ib1 = READ_ONCE(hwe->ib1); + +- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { +- cur->hash = 0xffff; +- __mtk_foe_entry_clear(ppe, cur); ++ if (!mtk_flow_entry_update(ppe, cur)) { ++ __mtk_foe_entry_clear(ppe, entry, false); + continue; + } + +- cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); ++ cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1); + if (cur_idle >= idle) + continue; + + idle = cur_idle; + entry->data.ib1 &= ~ib1_ts_mask; +- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask; +- } +-} +- +-static void +-mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +-{ +- struct mtk_foe_entry foe = {}; +- struct mtk_foe_entry *hwe; +- +- spin_lock_bh(&ppe_lock); +- +- if (entry->type == MTK_FLOW_TYPE_L2) { +- mtk_flow_entry_update_l2(ppe, entry); +- goto out; ++ entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask; + } +- +- if (entry->hash == 0xffff) +- goto out; +- +- hwe = mtk_foe_get_entry(ppe, entry->hash); +- memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size); +- if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) { +- entry->hash = 0xffff; +- goto out; +- } +- +- entry->data.ib1 = foe.ib1; +- +-out: +- spin_unlock_bh(&ppe_lock); + } + + static void +@@ -632,7 +624,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p + void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + spin_lock_bh(&ppe_lock); +- __mtk_foe_entry_clear(ppe, entry); ++ __mtk_foe_entry_clear(ppe, entry, true); ++ hlist_del_init(&entry->list); + spin_unlock_bh(&ppe_lock); + } + +@@ -679,8 +672,8 @@ mtk_foe_entry_commit_subflow(struct mtk_ + { + const struct mtk_soc_data *soc = ppe->eth->soc; + struct mtk_flow_entry *flow_info; +- struct mtk_foe_entry foe = {}, *hwe; + struct mtk_foe_mac_info *l2; ++ struct mtk_foe_entry *hwe; + u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP; + int type; + +@@ -688,30 +681,30 @@ mtk_foe_entry_commit_subflow(struct mtk_ + if (!flow_info) + return; + +- flow_info->l2_data.base_flow = entry; + flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; + flow_info->hash = hash; + hlist_add_head(&flow_info->list, + &ppe->foe_flow[hash / soc->hash_offset]); +- hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); ++ hlist_add_head(&flow_info->list, &entry->l2_flows); + + hwe = mtk_foe_get_entry(ppe, hash); +- memcpy(&foe, hwe, soc->foe_entry_size); +- foe.ib1 &= ib1_mask; +- foe.ib1 |= entry->data.ib1 & ~ib1_mask; ++ memcpy(&flow_info->data, hwe, soc->foe_entry_size); ++ flow_info->data.ib1 &= ib1_mask; ++ flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask; + +- l2 = mtk_foe_entry_l2(ppe->eth, &foe); ++ l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data); + memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); + +- type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1); ++ type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1); + if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) +- memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); ++ memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig, ++ sizeof(flow_info->data.ipv4.new)); + else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) + l2->etype = ETH_P_IPV6; + +- *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2; ++ *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2; + +- __mtk_foe_entry_commit(ppe, &foe, hash); ++ __mtk_foe_entry_commit(ppe, &flow_info->data, hash); + } + + void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) +@@ -721,9 +714,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash); + struct mtk_flow_entry *entry; + struct mtk_foe_bridge key = {}; ++ struct mtk_foe_entry foe = {}; + struct hlist_node *n; + struct ethhdr *eh; + bool found = false; ++ int entry_len; + u8 *tag; + + spin_lock_bh(&ppe_lock); +@@ -731,20 +726,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe + if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) + goto out; + +- hlist_for_each_entry_safe(entry, n, head, list) { +- if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { +- if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == +- MTK_FOE_STATE_BIND)) +- continue; +- +- entry->hash = 0xffff; +- __mtk_foe_entry_clear(ppe, entry); +- continue; +- } ++ entry_len = mtk_flow_entry_match_len(ppe->eth, hwe); ++ memcpy(&foe, hwe, entry_len); + +- if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) { ++ hlist_for_each_entry_safe(entry, n, head, list) { ++ if (found || ++ !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) { + if (entry->hash != 0xffff) +- entry->hash = 0xffff; ++ __mtk_foe_entry_clear(ppe, entry, false); + continue; + } + +@@ -795,9 +784,17 @@ out: + + int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { +- mtk_flow_entry_update(ppe, entry); ++ int idle; ++ ++ spin_lock_bh(&ppe_lock); ++ if (entry->type == MTK_FLOW_TYPE_L2) ++ mtk_flow_entry_update_l2(ppe, entry); ++ else ++ mtk_flow_entry_update(ppe, entry); ++ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); ++ spin_unlock_bh(&ppe_lock); + +- return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); ++ return idle; + } + + int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -275,13 +275,7 @@ struct mtk_flow_entry { + s8 wed_index; + u8 ppe_index; + u16 hash; +- union { +- struct mtk_foe_entry data; +- struct { +- struct mtk_flow_entry *base_flow; +- struct hlist_node list; +- } l2_data; +- }; ++ struct mtk_foe_entry data; + struct rhash_head node; + unsigned long cookie; + }; diff --git a/target/linux/generic/pending-5.15/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch b/target/linux/generic/pending-5.15/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch new file mode 100644 index 0000000000..888ecc9b61 --- /dev/null +++ b/target/linux/generic/pending-5.15/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch @@ -0,0 +1,320 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 23 Mar 2023 11:05:22 +0100 +Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2 + flows + +For L2 flows, the packet/byte counters should report the sum of the +counters of their subflows, both current and expired. +In order to make this work, change the way that accounting data is tracked. +Reset counters when a flow enters bind. Once it expires (or enters unbind), +store the last counter value in struct mtk_flow_entry. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct + int ret; + u32 val; + +- ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val, +- !(val & MTK_PPE_MIB_SER_CR_ST), +- 20, MTK_PPE_WAIT_TIMEOUT_US); ++ ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val, ++ !(val & MTK_PPE_MIB_SER_CR_ST), ++ 20, MTK_PPE_WAIT_TIMEOUT_US); + + if (ret) + dev_err(ppe->dev, "MIB table busy"); +@@ -90,18 +90,32 @@ static int mtk_ppe_mib_wait_busy(struct + return ret; + } + +-static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets) ++static inline struct mtk_foe_accounting * ++mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index) ++{ ++ if (!ppe->acct_table) ++ return NULL; ++ ++ return ppe->acct_table + index * sizeof(struct mtk_foe_accounting); ++} ++ ++struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index) + { + u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high; + u32 val, cnt_r0, cnt_r1, cnt_r2; ++ struct mtk_foe_accounting *acct; + int ret; + + val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST; + ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val); + ++ acct = mtk_ppe_acct_data(ppe, index); ++ if (!acct) ++ return NULL; ++ + ret = mtk_ppe_mib_wait_busy(ppe); + if (ret) +- return ret; ++ return acct; + + cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0); + cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1); +@@ -111,10 +125,11 @@ static int mtk_mib_entry_read(struct mtk + byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1); + pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1); + pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2); +- *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low; +- *packets = (pkt_cnt_high << 16) | pkt_cnt_low; + +- return 0; ++ acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low; ++ acct->packets += (pkt_cnt_high << 16) | pkt_cnt_low; ++ ++ return acct; + } + + static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) +@@ -510,13 +525,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp + hwe->ib1 &= ~MTK_FOE_IB1_STATE; + hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); + dma_wmb(); +- if (ppe->accounting) { +- struct mtk_foe_accounting *acct; +- +- acct = ppe->acct_table + entry->hash * sizeof(*acct); +- acct->packets = 0; +- acct->bytes = 0; +- } + } + entry->hash = 0xffff; + +@@ -540,8 +548,10 @@ static int __mtk_foe_entry_idle_time(str + } + + static bool +-mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ u64 *packets, u64 *bytes) + { ++ struct mtk_foe_accounting *acct; + struct mtk_foe_entry foe = {}; + struct mtk_foe_entry *hwe; + u16 hash = entry->hash; +@@ -555,16 +565,29 @@ mtk_flow_entry_update(struct mtk_ppe *pp + memcpy(&foe, hwe, len); + + if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) || +- FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) ++ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) { ++ acct = mtk_ppe_acct_data(ppe, hash); ++ if (acct) { ++ entry->packets += acct->packets; ++ entry->bytes += acct->bytes; ++ } ++ + return false; ++ } + + entry->data.ib1 = foe.ib1; ++ acct = mtk_ppe_mib_entry_read(ppe, hash); ++ if (acct) { ++ *packets += acct->packets; ++ *bytes += acct->bytes; ++ } + + return true; + } + + static void +-mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ u64 *packets, u64 *bytes) + { + u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); + struct mtk_flow_entry *cur; +@@ -575,7 +598,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, list) { + int cur_idle; + +- if (!mtk_flow_entry_update(ppe, cur)) { ++ if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) { ++ entry->packets += cur->packets; ++ entry->bytes += cur->bytes; + __mtk_foe_entry_clear(ppe, entry, false); + continue; + } +@@ -590,10 +615,31 @@ mtk_flow_entry_update_l2(struct mtk_ppe + } + } + ++void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ int *idle, u64 *packets, u64 *bytes) ++{ ++ *packets = 0; ++ *bytes = 0; ++ ++ spin_lock_bh(&ppe_lock); ++ ++ if (entry->type == MTK_FLOW_TYPE_L2) ++ mtk_flow_entry_update_l2(ppe, entry, packets, bytes); ++ else ++ mtk_flow_entry_update(ppe, entry, packets, bytes); ++ ++ *packets += entry->packets; ++ *bytes += entry->bytes; ++ *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); ++ ++ spin_unlock_bh(&ppe_lock); ++} ++ + static void + __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 hash) + { ++ struct mtk_foe_accounting *acct; + struct mtk_eth *eth = ppe->eth; + u16 timestamp = mtk_eth_timestamp(eth); + struct mtk_foe_entry *hwe; +@@ -618,6 +664,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p + + dma_wmb(); + ++ acct = mtk_ppe_mib_entry_read(ppe, hash); ++ if (acct) { ++ acct->packets = 0; ++ acct->bytes = 0; ++ } ++ + mtk_ppe_cache_clear(ppe); + } + +@@ -782,21 +834,6 @@ out: + spin_unlock_bh(&ppe_lock); + } + +-int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +-{ +- int idle; +- +- spin_lock_bh(&ppe_lock); +- if (entry->type == MTK_FLOW_TYPE_L2) +- mtk_flow_entry_update_l2(ppe, entry); +- else +- mtk_flow_entry_update(ppe, entry); +- idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); +- spin_unlock_bh(&ppe_lock); +- +- return idle; +-} +- + int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) + { + if (!ppe) +@@ -824,32 +861,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe + return mtk_ppe_wait_busy(ppe); + } + +-struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, +- struct mtk_foe_accounting *diff) +-{ +- struct mtk_foe_accounting *acct; +- int size = sizeof(struct mtk_foe_accounting); +- u64 bytes, packets; +- +- if (!ppe->accounting) +- return NULL; +- +- if (mtk_mib_entry_read(ppe, index, &bytes, &packets)) +- return NULL; +- +- acct = ppe->acct_table + index * size; +- +- acct->bytes += bytes; +- acct->packets += packets; +- +- if (diff) { +- diff->bytes = bytes; +- diff->packets = packets; +- } +- +- return acct; +-} +- + struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index) + { + bool accounting = eth->soc->has_accounting; +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -278,6 +278,8 @@ struct mtk_flow_entry { + struct mtk_foe_entry data; + struct rhash_head node; + unsigned long cookie; ++ u64 packets; ++ u64 bytes; + }; + + struct mtk_mib_entry { +@@ -320,6 +322,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_ + void mtk_ppe_start(struct mtk_ppe *ppe); + int mtk_ppe_stop(struct mtk_ppe *ppe); + int mtk_ppe_prepare_reset(struct mtk_ppe *ppe); ++struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index); + + void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); + +@@ -368,9 +371,8 @@ int mtk_foe_entry_set_queue(struct mtk_e + unsigned int queue); + int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); + void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +-int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); + int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index); +-struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, +- struct mtk_foe_accounting *diff); ++void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ int *idle, u64 *packets, u64 *bytes); + + #endif +--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +@@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file + if (bind && state != MTK_FOE_STATE_BIND) + continue; + +- acct = mtk_foe_entry_get_mib(ppe, i, NULL); ++ acct = mtk_ppe_mib_entry_read(ppe, i); + + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + seq_printf(m, "%05x %s %7s", i, +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -499,24 +499,17 @@ static int + mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) + { + struct mtk_flow_entry *entry; +- struct mtk_foe_accounting diff; +- u32 idle; ++ int idle; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, + mtk_flow_ht_params); + if (!entry) + return -ENOENT; + +- idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry); ++ mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle, ++ &f->stats.pkts, &f->stats.bytes); + f->stats.lastused = jiffies - idle * HZ; + +- if (entry->hash != 0xFFFF && +- mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash, +- &diff)) { +- f->stats.pkts += diff.packets; +- f->stats.bytes += diff.bytes; +- } +- + return 0; + } + diff --git a/target/linux/generic/pending-5.15/736-05-net-ethernet-mtk_eth_soc-add-missing-ppe-cache-flush.patch b/target/linux/generic/pending-5.15/736-05-net-ethernet-mtk_eth_soc-add-missing-ppe-cache-flush.patch new file mode 100644 index 0000000000..0b876acacf --- /dev/null +++ b/target/linux/generic/pending-5.15/736-05-net-ethernet-mtk_eth_soc-add-missing-ppe-cache-flush.patch @@ -0,0 +1,21 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Thu, 23 Mar 2023 11:19:14 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: add missing ppe cache flush when + deleting a flow + +The cache needs to be flushed to ensure that the hardware stops offloading +the flow immediately. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -525,6 +525,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp + hwe->ib1 &= ~MTK_FOE_IB1_STATE; + hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); + dma_wmb(); ++ mtk_ppe_cache_clear(ppe); + } + entry->hash = 0xffff; + |