From: Mieczyslaw Nalewaj Date: Mon, 17 Mar 2025 07:33:33 +0000 (+0100) Subject: generic: move backport patches 751-03 and 751-04 to pending X-Git-Url: http://git.openwrt.org/?a=commitdiff_plain;h=27e25d3401cc87e42ade0215df5bd3b17ef2d83f;p=openwrt%2Fstaging%2Fpepe2k.git generic: move backport patches 751-03 and 751-04 to pending Patches 751-03 and 751-04 as a result of commit 6407ef8d2bcb4a0a6284de09cd77bd1868c1d6ea were incorrectly placed in the backport folder. So they return to their proper place. Signed-off-by: Mieczyslaw Nalewaj Link: https://github.com/openwrt/openwrt/pull/18253 Signed-off-by: Hauke Mehrtens --- diff --git a/target/linux/generic/backport-6.6/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch b/target/linux/generic/backport-6.6/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch deleted file mode 100644 index 2689712228..0000000000 --- a/target/linux/generic/backport-6.6/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch +++ /dev/null @@ -1,334 +0,0 @@ -From: Felix Fietkau -Date: Thu, 23 Mar 2023 10:24:11 +0100 -Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of - offloaded flows - -Unify tracking of L2 and L3 flows. Use the generic list field in struct -mtk_foe_entry for tracking L2 subflows. Preparation for improving -flow accounting support. - -Signed-off-by: Felix Fietkau ---- - drivers/net/ethernet/mediatek/mtk_ppe.c | 162 ++++++++++++------------ - drivers/net/ethernet/mediatek/mtk_ppe.h | 15 +-- - 2 files changed, 86 insertions(+), 91 deletions(-) - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -477,42 +477,43 @@ int mtk_foe_entry_set_queue(struct mtk_e - return 0; - } - -+static int -+mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry) -+{ -+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1); -+ -+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) -+ return offsetof(struct mtk_foe_entry, ipv6._rsv); -+ else -+ return offsetof(struct mtk_foe_entry, ipv4.ib2); -+} -+ - static bool - mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, -- struct mtk_foe_entry *data) -+ struct mtk_foe_entry *data, int len) - { -- int type, len; -- - if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) - return false; - -- type = mtk_get_ib1_pkt_type(eth, entry->data.ib1); -- if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) -- len = offsetof(struct mtk_foe_entry, ipv6._rsv); -- else -- len = offsetof(struct mtk_foe_entry, ipv4.ib2); -- - return !memcmp(&entry->data.data, &data->data, len - 4); - } - - static void --__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, -+ bool set_state) - { -- struct hlist_head *head; - struct hlist_node *tmp; - - if (entry->type == MTK_FLOW_TYPE_L2) { - rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, - mtk_flow_l2_ht_params); - -- head = &entry->l2_flows; -- hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) -- __mtk_foe_entry_clear(ppe, entry); -+ hlist_for_each_entry_safe(entry, tmp, &entry->l2_flows, l2_list) -+ __mtk_foe_entry_clear(ppe, entry, set_state); - return; - } - -- hlist_del_init(&entry->list); -- if (entry->hash != 0xffff) { -+ if (entry->hash != 0xffff && set_state) { - struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); - - hwe->ib1 &= ~MTK_FOE_IB1_STATE; -@@ -533,7 +534,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp - if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) - return; - -- hlist_del_init(&entry->l2_data.list); -+ hlist_del_init(&entry->l2_list); -+ hlist_del_init(&entry->list); - kfree(entry); - } - -@@ -549,66 +551,55 @@ static int __mtk_foe_entry_idle_time(str - return now - timestamp; - } - -+static bool -+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+{ -+ struct mtk_foe_entry foe = {}; -+ struct mtk_foe_entry *hwe; -+ u16 hash = entry->hash; -+ int len; -+ -+ if (hash == 0xffff) -+ return false; -+ -+ hwe = mtk_foe_get_entry(ppe, hash); -+ len = mtk_flow_entry_match_len(ppe->eth, &entry->data); -+ memcpy(&foe, hwe, len); -+ -+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) || -+ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) -+ return false; -+ -+ entry->data.ib1 = foe.ib1; -+ -+ return true; -+} -+ - static void - mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { - u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); - struct mtk_flow_entry *cur; -- struct mtk_foe_entry *hwe; - struct hlist_node *tmp; - int idle; - - idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); -- hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { -+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) { - int cur_idle; -- u32 ib1; -- -- hwe = mtk_foe_get_entry(ppe, cur->hash); -- ib1 = READ_ONCE(hwe->ib1); - -- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { -- cur->hash = 0xffff; -- __mtk_foe_entry_clear(ppe, cur); -+ if (!mtk_flow_entry_update(ppe, cur)) { -+ __mtk_foe_entry_clear(ppe, entry, false); - continue; - } - -- cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); -+ cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1); - if (cur_idle >= idle) - continue; - - idle = cur_idle; - entry->data.ib1 &= ~ib1_ts_mask; -- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask; -- } --} -- --static void --mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) --{ -- struct mtk_foe_entry foe = {}; -- struct mtk_foe_entry *hwe; -- -- spin_lock_bh(&ppe_lock); -- -- if (entry->type == MTK_FLOW_TYPE_L2) { -- mtk_flow_entry_update_l2(ppe, entry); -- goto out; -+ entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask; - } -- -- if (entry->hash == 0xffff) -- goto out; -- -- hwe = mtk_foe_get_entry(ppe, entry->hash); -- memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size); -- if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) { -- entry->hash = 0xffff; -- goto out; -- } -- -- entry->data.ib1 = foe.ib1; -- --out: -- spin_unlock_bh(&ppe_lock); - } - - static void -@@ -651,7 +642,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p - void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { - spin_lock_bh(&ppe_lock); -- __mtk_foe_entry_clear(ppe, entry); -+ __mtk_foe_entry_clear(ppe, entry, true); -+ hlist_del_init(&entry->list); - spin_unlock_bh(&ppe_lock); - } - -@@ -698,8 +690,8 @@ mtk_foe_entry_commit_subflow(struct mtk_ - { - const struct mtk_soc_data *soc = ppe->eth->soc; - struct mtk_flow_entry *flow_info; -- struct mtk_foe_entry foe = {}, *hwe; - struct mtk_foe_mac_info *l2; -+ struct mtk_foe_entry *hwe; - u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP; - int type; - -@@ -707,30 +699,30 @@ mtk_foe_entry_commit_subflow(struct mtk_ - if (!flow_info) - return; - -- flow_info->l2_data.base_flow = entry; - flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; - flow_info->hash = hash; - hlist_add_head(&flow_info->list, - &ppe->foe_flow[hash / soc->hash_offset]); -- hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); -+ hlist_add_head(&flow_info->l2_list, &entry->l2_flows); - - hwe = mtk_foe_get_entry(ppe, hash); -- memcpy(&foe, hwe, soc->foe_entry_size); -- foe.ib1 &= ib1_mask; -- foe.ib1 |= entry->data.ib1 & ~ib1_mask; -+ memcpy(&flow_info->data, hwe, soc->foe_entry_size); -+ flow_info->data.ib1 &= ib1_mask; -+ flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask; - -- l2 = mtk_foe_entry_l2(ppe->eth, &foe); -+ l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data); - memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); - -- type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1); -+ type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1); - if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) -- memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); -+ memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig, -+ sizeof(flow_info->data.ipv4.new)); - else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) - l2->etype = ETH_P_IPV6; - -- *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2; -+ *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2; - -- __mtk_foe_entry_commit(ppe, &foe, hash); -+ __mtk_foe_entry_commit(ppe, &flow_info->data, hash); - } - - void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) -@@ -740,9 +732,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe - struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash); - struct mtk_flow_entry *entry; - struct mtk_foe_bridge key = {}; -+ struct mtk_foe_entry foe = {}; - struct hlist_node *n; - struct ethhdr *eh; - bool found = false; -+ int entry_len; - u8 *tag; - - spin_lock_bh(&ppe_lock); -@@ -750,20 +744,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe - if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) - goto out; - -- hlist_for_each_entry_safe(entry, n, head, list) { -- if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { -- if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == -- MTK_FOE_STATE_BIND)) -- continue; -- -- entry->hash = 0xffff; -- __mtk_foe_entry_clear(ppe, entry); -- continue; -- } -+ entry_len = mtk_flow_entry_match_len(ppe->eth, hwe); -+ memcpy(&foe, hwe, entry_len); - -- if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) { -+ hlist_for_each_entry_safe(entry, n, head, list) { -+ if (found || -+ !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) { - if (entry->hash != 0xffff) -- entry->hash = 0xffff; -+ __mtk_foe_entry_clear(ppe, entry, false); - continue; - } - -@@ -814,9 +802,17 @@ out: - - int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { -- mtk_flow_entry_update(ppe, entry); -+ int idle; -+ -+ spin_lock_bh(&ppe_lock); -+ if (entry->type == MTK_FLOW_TYPE_L2) -+ mtk_flow_entry_update_l2(ppe, entry); -+ else -+ mtk_flow_entry_update(ppe, entry); -+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); -+ spin_unlock_bh(&ppe_lock); - -- return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); -+ return idle; - } - - int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -286,7 +286,12 @@ enum { - - struct mtk_flow_entry { - union { -- struct hlist_node list; -+ /* regular flows + L2 subflows */ -+ struct { -+ struct hlist_node list; -+ struct hlist_node l2_list; -+ }; -+ /* L2 flows */ - struct { - struct rhash_head l2_node; - struct hlist_head l2_flows; -@@ -296,13 +301,7 @@ struct mtk_flow_entry { - s8 wed_index; - u8 ppe_index; - u16 hash; -- union { -- struct mtk_foe_entry data; -- struct { -- struct mtk_flow_entry *base_flow; -- struct hlist_node list; -- } l2_data; -- }; -+ struct mtk_foe_entry data; - struct rhash_head node; - unsigned long cookie; - }; diff --git a/target/linux/generic/backport-6.6/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch b/target/linux/generic/backport-6.6/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch deleted file mode 100644 index a93f80ac79..0000000000 --- a/target/linux/generic/backport-6.6/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch +++ /dev/null @@ -1,343 +0,0 @@ -From: Felix Fietkau -Date: Thu, 23 Mar 2023 11:05:22 +0100 -Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2 - flows - -For L2 flows, the packet/byte counters should report the sum of the -counters of their subflows, both current and expired. -In order to make this work, change the way that accounting data is tracked. -Reset counters when a flow enters bind. Once it expires (or enters unbind), -store the last counter value in struct mtk_flow_entry. - -Signed-off-by: Felix Fietkau ---- - ---- a/drivers/net/ethernet/mediatek/mtk_ppe.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct - int ret; - u32 val; - -- ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val, -- !(val & MTK_PPE_MIB_SER_CR_ST), -- 20, MTK_PPE_WAIT_TIMEOUT_US); -+ ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val, -+ !(val & MTK_PPE_MIB_SER_CR_ST), -+ 20, MTK_PPE_WAIT_TIMEOUT_US); - - if (ret) - dev_err(ppe->dev, "MIB table busy"); -@@ -90,17 +90,31 @@ static int mtk_ppe_mib_wait_busy(struct - return ret; - } - --static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets) -+static inline struct mtk_foe_accounting * -+mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index) -+{ -+ if (!ppe->acct_table) -+ return NULL; -+ -+ return ppe->acct_table + index * sizeof(struct mtk_foe_accounting); -+} -+ -+struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index) - { - u32 val, cnt_r0, cnt_r1, cnt_r2; -+ struct mtk_foe_accounting *acct; - int ret; - - val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST; - ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val); - -+ acct = mtk_ppe_acct_data(ppe, index); -+ if (!acct) -+ return NULL; -+ - ret = mtk_ppe_mib_wait_busy(ppe); - if (ret) -- return ret; -+ return acct; - - cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0); - cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1); -@@ -109,19 +123,19 @@ static int mtk_mib_entry_read(struct mtk - if (mtk_is_netsys_v3_or_greater(ppe->eth)) { - /* 64 bit for each counter */ - u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3); -- *bytes = ((u64)cnt_r1 << 32) | cnt_r0; -- *packets = ((u64)cnt_r3 << 32) | cnt_r2; -+ acct->bytes += ((u64)cnt_r1 << 32) | cnt_r0; -+ acct->packets += ((u64)cnt_r3 << 32) | cnt_r2; - } else { - /* 48 bit byte counter, 40 bit packet counter */ - u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0); - u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1); - u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1); - u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2); -- *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low; -- *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low; -+ acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low; -+ acct->packets += ((u64)pkt_cnt_high << 16) | pkt_cnt_low; - } - -- return 0; -+ return acct; - } - - static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) -@@ -520,14 +534,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp - hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); - dma_wmb(); - mtk_ppe_cache_clear(ppe); -- -- if (ppe->accounting) { -- struct mtk_foe_accounting *acct; -- -- acct = ppe->acct_table + entry->hash * sizeof(*acct); -- acct->packets = 0; -- acct->bytes = 0; -- } - } - entry->hash = 0xffff; - -@@ -552,11 +558,14 @@ static int __mtk_foe_entry_idle_time(str - } - - static bool --mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) -+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, -+ u64 *packets, u64 *bytes) - { -+ struct mtk_foe_accounting *acct; - struct mtk_foe_entry foe = {}; - struct mtk_foe_entry *hwe; - u16 hash = entry->hash; -+ bool ret = false; - int len; - - if (hash == 0xffff) -@@ -567,18 +576,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp - memcpy(&foe, hwe, len); - - if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) || -- FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) -- return false; -+ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) { -+ acct = mtk_ppe_acct_data(ppe, hash); -+ if (acct) { -+ entry->prev_packets += acct->packets; -+ entry->prev_bytes += acct->bytes; -+ } -+ -+ goto out; -+ } - - entry->data.ib1 = foe.ib1; -+ acct = mtk_ppe_mib_entry_read(ppe, hash); -+ ret = true; -+ -+out: -+ if (acct) { -+ *packets += acct->packets; -+ *bytes += acct->bytes; -+ } - -- return true; -+ return ret; - } - - static void - mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) - { - u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); -+ u64 *packets = &entry->packets; -+ u64 *bytes = &entry->bytes; - struct mtk_flow_entry *cur; - struct hlist_node *tmp; - int idle; -@@ -587,7 +613,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe - hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) { - int cur_idle; - -- if (!mtk_flow_entry_update(ppe, cur)) { -+ if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) { -+ entry->prev_packets += cur->prev_packets; -+ entry->prev_bytes += cur->prev_bytes; - __mtk_foe_entry_clear(ppe, entry, false); - continue; - } -@@ -602,10 +630,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe - } - } - -+void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, -+ int *idle) -+{ -+ entry->packets = entry->prev_packets; -+ entry->bytes = entry->prev_bytes; -+ -+ spin_lock_bh(&ppe_lock); -+ -+ if (entry->type == MTK_FLOW_TYPE_L2) -+ mtk_flow_entry_update_l2(ppe, entry); -+ else -+ mtk_flow_entry_update(ppe, entry, &entry->packets, &entry->bytes); -+ -+ *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); -+ -+ spin_unlock_bh(&ppe_lock); -+} -+ - static void - __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, - u16 hash) - { -+ struct mtk_foe_accounting *acct; - struct mtk_eth *eth = ppe->eth; - u16 timestamp = mtk_eth_timestamp(eth); - struct mtk_foe_entry *hwe; -@@ -636,6 +683,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p - - dma_wmb(); - -+ acct = mtk_ppe_mib_entry_read(ppe, hash); -+ if (acct) { -+ acct->packets = 0; -+ acct->bytes = 0; -+ } -+ - mtk_ppe_cache_clear(ppe); - } - -@@ -800,21 +853,6 @@ out: - spin_unlock_bh(&ppe_lock); - } - --int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) --{ -- int idle; -- -- spin_lock_bh(&ppe_lock); -- if (entry->type == MTK_FLOW_TYPE_L2) -- mtk_flow_entry_update_l2(ppe, entry); -- else -- mtk_flow_entry_update(ppe, entry); -- idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); -- spin_unlock_bh(&ppe_lock); -- -- return idle; --} -- - int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) - { - if (!ppe) -@@ -842,32 +880,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe - return mtk_ppe_wait_busy(ppe); - } - --struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, -- struct mtk_foe_accounting *diff) --{ -- struct mtk_foe_accounting *acct; -- int size = sizeof(struct mtk_foe_accounting); -- u64 bytes, packets; -- -- if (!ppe->accounting) -- return NULL; -- -- if (mtk_mib_entry_read(ppe, index, &bytes, &packets)) -- return NULL; -- -- acct = ppe->acct_table + index * size; -- -- acct->bytes += bytes; -- acct->packets += packets; -- -- if (diff) { -- diff->bytes = bytes; -- diff->packets = packets; -- } -- -- return acct; --} -- - struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index) - { - bool accounting = eth->soc->has_accounting; ---- a/drivers/net/ethernet/mediatek/mtk_ppe.h -+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h -@@ -304,6 +304,8 @@ struct mtk_flow_entry { - struct mtk_foe_entry data; - struct rhash_head node; - unsigned long cookie; -+ u64 prev_packets, prev_bytes; -+ u64 packets, bytes; - }; - - struct mtk_mib_entry { -@@ -348,6 +350,7 @@ void mtk_ppe_deinit(struct mtk_eth *eth) - void mtk_ppe_start(struct mtk_ppe *ppe); - int mtk_ppe_stop(struct mtk_ppe *ppe); - int mtk_ppe_prepare_reset(struct mtk_ppe *ppe); -+struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index); - - void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); - -@@ -396,9 +399,8 @@ int mtk_foe_entry_set_queue(struct mtk_e - unsigned int queue); - int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); - void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); --int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); - int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index); --struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, -- struct mtk_foe_accounting *diff); -+void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, -+ int *idle); - - #endif ---- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c -@@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file - if (bind && state != MTK_FOE_STATE_BIND) - continue; - -- acct = mtk_foe_entry_get_mib(ppe, i, NULL); -+ acct = mtk_ppe_mib_entry_read(ppe, i); - - type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1); - seq_printf(m, "%05x %s %7s", i, ---- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c -@@ -501,24 +501,21 @@ static int - mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) - { - struct mtk_flow_entry *entry; -- struct mtk_foe_accounting diff; -- u32 idle; -+ u64 packets, bytes; -+ int idle; - - entry = rhashtable_lookup(ð->flow_table, &f->cookie, - mtk_flow_ht_params); - if (!entry) - return -ENOENT; - -- idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry); -+ packets = entry->packets; -+ bytes = entry->bytes; -+ mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle); -+ f->stats.pkts += entry->packets - packets; -+ f->stats.bytes += entry->bytes - bytes; - f->stats.lastused = jiffies - idle * HZ; - -- if (entry->hash != 0xFFFF && -- mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash, -- &diff)) { -- f->stats.pkts += diff.packets; -- f->stats.bytes += diff.bytes; -- } -- - return 0; - } - diff --git a/target/linux/generic/backport-6.6/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch b/target/linux/generic/backport-6.6/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch index 4e72ea128a..cc4baecdfb 100644 --- a/target/linux/generic/backport-6.6/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch +++ b/target/linux/generic/backport-6.6/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch @@ -15,7 +15,7 @@ Signed-off-by: Paolo Abeni --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c -@@ -439,7 +439,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e +@@ -425,7 +425,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e } int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, @@ -25,7 +25,7 @@ Signed-off-by: Paolo Abeni { struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); u32 *ib2 = mtk_foe_entry_ib2(eth, entry); -@@ -451,6 +452,7 @@ int mtk_foe_entry_set_wdma(struct mtk_et +@@ -437,6 +438,7 @@ int mtk_foe_entry_set_wdma(struct mtk_et MTK_FOE_IB2_WDMA_WINFO_V2; l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) | FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss); @@ -65,7 +65,7 @@ Signed-off-by: Paolo Abeni }; /* software-only entry type */ -@@ -394,7 +394,8 @@ int mtk_foe_entry_set_vlan(struct mtk_et +@@ -392,7 +392,8 @@ int mtk_foe_entry_set_vlan(struct mtk_et int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, int sid); int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, diff --git a/target/linux/generic/backport-6.6/752-26-v6.10-net-ethernet-mtk_eth_soc-ppe-add-support-for-multipl.patch b/target/linux/generic/backport-6.6/752-26-v6.10-net-ethernet-mtk_eth_soc-ppe-add-support-for-multipl.patch index 07e7e86340..789b363cef 100644 --- a/target/linux/generic/backport-6.6/752-26-v6.10-net-ethernet-mtk_eth_soc-ppe-add-support-for-multipl.patch +++ b/target/linux/generic/backport-6.6/752-26-v6.10-net-ethernet-mtk_eth_soc-ppe-add-support-for-multipl.patch @@ -358,7 +358,7 @@ Signed-off-by: Jakub Kicinski } else { return -EOPNOTSUPP; } -@@ -630,7 +641,9 @@ int mtk_eth_setup_tc(struct net_device * +@@ -633,7 +644,9 @@ int mtk_eth_setup_tc(struct net_device * } } diff --git a/target/linux/generic/pending-6.6/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch b/target/linux/generic/pending-6.6/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch new file mode 100644 index 0000000000..632133c408 --- /dev/null +++ b/target/linux/generic/pending-6.6/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch @@ -0,0 +1,334 @@ +From: Felix Fietkau +Date: Thu, 23 Mar 2023 10:24:11 +0100 +Subject: [PATCH] net: ethernet: mtk_eth_soc: improve keeping track of + offloaded flows + +Unify tracking of L2 and L3 flows. Use the generic list field in struct +mtk_foe_entry for tracking L2 subflows. Preparation for improving +flow accounting support. + +Signed-off-by: Felix Fietkau +--- + drivers/net/ethernet/mediatek/mtk_ppe.c | 162 ++++++++++++------------ + drivers/net/ethernet/mediatek/mtk_ppe.h | 15 +-- + 2 files changed, 86 insertions(+), 91 deletions(-) + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -479,42 +479,43 @@ int mtk_foe_entry_set_queue(struct mtk_e + return 0; + } + ++static int ++mtk_flow_entry_match_len(struct mtk_eth *eth, struct mtk_foe_entry *entry) ++{ ++ int type = mtk_get_ib1_pkt_type(eth, entry->ib1); ++ ++ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) ++ return offsetof(struct mtk_foe_entry, ipv6._rsv); ++ else ++ return offsetof(struct mtk_foe_entry, ipv4.ib2); ++} ++ + static bool + mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, +- struct mtk_foe_entry *data) ++ struct mtk_foe_entry *data, int len) + { +- int type, len; +- + if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) + return false; + +- type = mtk_get_ib1_pkt_type(eth, entry->data.ib1); +- if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) +- len = offsetof(struct mtk_foe_entry, ipv6._rsv); +- else +- len = offsetof(struct mtk_foe_entry, ipv4.ib2); +- + return !memcmp(&entry->data.data, &data->data, len - 4); + } + + static void +-__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ bool set_state) + { +- struct hlist_head *head; + struct hlist_node *tmp; + + if (entry->type == MTK_FLOW_TYPE_L2) { + rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, + mtk_flow_l2_ht_params); + +- head = &entry->l2_flows; +- hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) +- __mtk_foe_entry_clear(ppe, entry); ++ hlist_for_each_entry_safe(entry, tmp, &entry->l2_flows, l2_list) ++ __mtk_foe_entry_clear(ppe, entry, set_state); + return; + } + +- hlist_del_init(&entry->list); +- if (entry->hash != 0xffff) { ++ if (entry->hash != 0xffff && set_state) { + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); + + hwe->ib1 &= ~MTK_FOE_IB1_STATE; +@@ -535,7 +536,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp + if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) + return; + +- hlist_del_init(&entry->l2_data.list); ++ hlist_del_init(&entry->l2_list); ++ hlist_del_init(&entry->list); + kfree(entry); + } + +@@ -551,66 +553,55 @@ static int __mtk_foe_entry_idle_time(str + return now - timestamp; + } + ++static bool ++mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++{ ++ struct mtk_foe_entry foe = {}; ++ struct mtk_foe_entry *hwe; ++ u16 hash = entry->hash; ++ int len; ++ ++ if (hash == 0xffff) ++ return false; ++ ++ hwe = mtk_foe_get_entry(ppe, hash); ++ len = mtk_flow_entry_match_len(ppe->eth, &entry->data); ++ memcpy(&foe, hwe, len); ++ ++ if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) || ++ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) ++ return false; ++ ++ entry->data.ib1 = foe.ib1; ++ ++ return true; ++} ++ + static void + mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); + struct mtk_flow_entry *cur; +- struct mtk_foe_entry *hwe; + struct hlist_node *tmp; + int idle; + + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); +- hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { ++ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) { + int cur_idle; +- u32 ib1; +- +- hwe = mtk_foe_get_entry(ppe, cur->hash); +- ib1 = READ_ONCE(hwe->ib1); + +- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { +- cur->hash = 0xffff; +- __mtk_foe_entry_clear(ppe, cur); ++ if (!mtk_flow_entry_update(ppe, cur)) { ++ __mtk_foe_entry_clear(ppe, entry, false); + continue; + } + +- cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); ++ cur_idle = __mtk_foe_entry_idle_time(ppe, cur->data.ib1); + if (cur_idle >= idle) + continue; + + idle = cur_idle; + entry->data.ib1 &= ~ib1_ts_mask; +- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask; +- } +-} +- +-static void +-mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +-{ +- struct mtk_foe_entry foe = {}; +- struct mtk_foe_entry *hwe; +- +- spin_lock_bh(&ppe_lock); +- +- if (entry->type == MTK_FLOW_TYPE_L2) { +- mtk_flow_entry_update_l2(ppe, entry); +- goto out; ++ entry->data.ib1 |= cur->data.ib1 & ib1_ts_mask; + } +- +- if (entry->hash == 0xffff) +- goto out; +- +- hwe = mtk_foe_get_entry(ppe, entry->hash); +- memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size); +- if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) { +- entry->hash = 0xffff; +- goto out; +- } +- +- entry->data.ib1 = foe.ib1; +- +-out: +- spin_unlock_bh(&ppe_lock); + } + + static void +@@ -653,7 +644,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p + void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + spin_lock_bh(&ppe_lock); +- __mtk_foe_entry_clear(ppe, entry); ++ __mtk_foe_entry_clear(ppe, entry, true); ++ hlist_del_init(&entry->list); + spin_unlock_bh(&ppe_lock); + } + +@@ -700,8 +692,8 @@ mtk_foe_entry_commit_subflow(struct mtk_ + { + const struct mtk_soc_data *soc = ppe->eth->soc; + struct mtk_flow_entry *flow_info; +- struct mtk_foe_entry foe = {}, *hwe; + struct mtk_foe_mac_info *l2; ++ struct mtk_foe_entry *hwe; + u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP; + int type; + +@@ -709,30 +701,30 @@ mtk_foe_entry_commit_subflow(struct mtk_ + if (!flow_info) + return; + +- flow_info->l2_data.base_flow = entry; + flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; + flow_info->hash = hash; + hlist_add_head(&flow_info->list, + &ppe->foe_flow[hash / soc->hash_offset]); +- hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); ++ hlist_add_head(&flow_info->l2_list, &entry->l2_flows); + + hwe = mtk_foe_get_entry(ppe, hash); +- memcpy(&foe, hwe, soc->foe_entry_size); +- foe.ib1 &= ib1_mask; +- foe.ib1 |= entry->data.ib1 & ~ib1_mask; ++ memcpy(&flow_info->data, hwe, soc->foe_entry_size); ++ flow_info->data.ib1 &= ib1_mask; ++ flow_info->data.ib1 |= entry->data.ib1 & ~ib1_mask; + +- l2 = mtk_foe_entry_l2(ppe->eth, &foe); ++ l2 = mtk_foe_entry_l2(ppe->eth, &flow_info->data); + memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); + +- type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1); ++ type = mtk_get_ib1_pkt_type(ppe->eth, flow_info->data.ib1); + if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) +- memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); ++ memcpy(&flow_info->data.ipv4.new, &flow_info->data.ipv4.orig, ++ sizeof(flow_info->data.ipv4.new)); + else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) + l2->etype = ETH_P_IPV6; + +- *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2; ++ *mtk_foe_entry_ib2(ppe->eth, &flow_info->data) = entry->data.bridge.ib2; + +- __mtk_foe_entry_commit(ppe, &foe, hash); ++ __mtk_foe_entry_commit(ppe, &flow_info->data, hash); + } + + void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) +@@ -742,9 +734,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash); + struct mtk_flow_entry *entry; + struct mtk_foe_bridge key = {}; ++ struct mtk_foe_entry foe = {}; + struct hlist_node *n; + struct ethhdr *eh; + bool found = false; ++ int entry_len; + u8 *tag; + + spin_lock_bh(&ppe_lock); +@@ -752,20 +746,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe + if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) + goto out; + +- hlist_for_each_entry_safe(entry, n, head, list) { +- if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { +- if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == +- MTK_FOE_STATE_BIND)) +- continue; +- +- entry->hash = 0xffff; +- __mtk_foe_entry_clear(ppe, entry); +- continue; +- } ++ entry_len = mtk_flow_entry_match_len(ppe->eth, hwe); ++ memcpy(&foe, hwe, entry_len); + +- if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) { ++ hlist_for_each_entry_safe(entry, n, head, list) { ++ if (found || ++ !mtk_flow_entry_match(ppe->eth, entry, &foe, entry_len)) { + if (entry->hash != 0xffff) +- entry->hash = 0xffff; ++ __mtk_foe_entry_clear(ppe, entry, false); + continue; + } + +@@ -816,9 +804,17 @@ out: + + int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { +- mtk_flow_entry_update(ppe, entry); ++ int idle; ++ ++ spin_lock_bh(&ppe_lock); ++ if (entry->type == MTK_FLOW_TYPE_L2) ++ mtk_flow_entry_update_l2(ppe, entry); ++ else ++ mtk_flow_entry_update(ppe, entry); ++ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); ++ spin_unlock_bh(&ppe_lock); + +- return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); ++ return idle; + } + + int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -286,7 +286,12 @@ enum { + + struct mtk_flow_entry { + union { +- struct hlist_node list; ++ /* regular flows + L2 subflows */ ++ struct { ++ struct hlist_node list; ++ struct hlist_node l2_list; ++ }; ++ /* L2 flows */ + struct { + struct rhash_head l2_node; + struct hlist_head l2_flows; +@@ -296,13 +301,7 @@ struct mtk_flow_entry { + s8 wed_index; + u8 ppe_index; + u16 hash; +- union { +- struct mtk_foe_entry data; +- struct { +- struct mtk_flow_entry *base_flow; +- struct hlist_node list; +- } l2_data; +- }; ++ struct mtk_foe_entry data; + struct rhash_head node; + unsigned long cookie; + }; diff --git a/target/linux/generic/pending-6.6/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch b/target/linux/generic/pending-6.6/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch new file mode 100644 index 0000000000..f80a8a4d28 --- /dev/null +++ b/target/linux/generic/pending-6.6/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch @@ -0,0 +1,343 @@ +From: Felix Fietkau +Date: Thu, 23 Mar 2023 11:05:22 +0100 +Subject: [PATCH] net: ethernet: mediatek: fix ppe flow accounting for L2 + flows + +For L2 flows, the packet/byte counters should report the sum of the +counters of their subflows, both current and expired. +In order to make this work, change the way that accounting data is tracked. +Reset counters when a flow enters bind. Once it expires (or enters unbind), +store the last counter value in struct mtk_flow_entry. + +Signed-off-by: Felix Fietkau +--- + +--- a/drivers/net/ethernet/mediatek/mtk_ppe.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c +@@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct + int ret; + u32 val; + +- ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val, +- !(val & MTK_PPE_MIB_SER_CR_ST), +- 20, MTK_PPE_WAIT_TIMEOUT_US); ++ ret = readl_poll_timeout_atomic(ppe->base + MTK_PPE_MIB_SER_CR, val, ++ !(val & MTK_PPE_MIB_SER_CR_ST), ++ 20, MTK_PPE_WAIT_TIMEOUT_US); + + if (ret) + dev_err(ppe->dev, "MIB table busy"); +@@ -90,17 +90,31 @@ static int mtk_ppe_mib_wait_busy(struct + return ret; + } + +-static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets) ++static inline struct mtk_foe_accounting * ++mtk_ppe_acct_data(struct mtk_ppe *ppe, u16 index) ++{ ++ if (!ppe->acct_table) ++ return NULL; ++ ++ return ppe->acct_table + index * sizeof(struct mtk_foe_accounting); ++} ++ ++struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index) + { + u32 val, cnt_r0, cnt_r1, cnt_r2; ++ struct mtk_foe_accounting *acct; + int ret; + + val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST; + ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val); + ++ acct = mtk_ppe_acct_data(ppe, index); ++ if (!acct) ++ return NULL; ++ + ret = mtk_ppe_mib_wait_busy(ppe); + if (ret) +- return ret; ++ return acct; + + cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0); + cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1); +@@ -109,19 +123,19 @@ static int mtk_mib_entry_read(struct mtk + if (mtk_is_netsys_v3_or_greater(ppe->eth)) { + /* 64 bit for each counter */ + u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3); +- *bytes = ((u64)cnt_r1 << 32) | cnt_r0; +- *packets = ((u64)cnt_r3 << 32) | cnt_r2; ++ acct->bytes += ((u64)cnt_r1 << 32) | cnt_r0; ++ acct->packets += ((u64)cnt_r3 << 32) | cnt_r2; + } else { + /* 48 bit byte counter, 40 bit packet counter */ + u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0); + u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1); + u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1); + u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2); +- *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low; +- *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low; ++ acct->bytes += ((u64)byte_cnt_high << 32) | byte_cnt_low; ++ acct->packets += ((u64)pkt_cnt_high << 16) | pkt_cnt_low; + } + +- return 0; ++ return acct; + } + + static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) +@@ -522,14 +536,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp + hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); + dma_wmb(); + mtk_ppe_cache_clear(ppe); +- +- if (ppe->accounting) { +- struct mtk_foe_accounting *acct; +- +- acct = ppe->acct_table + entry->hash * sizeof(*acct); +- acct->packets = 0; +- acct->bytes = 0; +- } + } + entry->hash = 0xffff; + +@@ -554,11 +560,14 @@ static int __mtk_foe_entry_idle_time(str + } + + static bool +-mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) ++mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ u64 *packets, u64 *bytes) + { ++ struct mtk_foe_accounting *acct; + struct mtk_foe_entry foe = {}; + struct mtk_foe_entry *hwe; + u16 hash = entry->hash; ++ bool ret = false; + int len; + + if (hash == 0xffff) +@@ -569,18 +578,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp + memcpy(&foe, hwe, len); + + if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) || +- FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) +- return false; ++ FIELD_GET(MTK_FOE_IB1_STATE, foe.ib1) != MTK_FOE_STATE_BIND) { ++ acct = mtk_ppe_acct_data(ppe, hash); ++ if (acct) { ++ entry->prev_packets += acct->packets; ++ entry->prev_bytes += acct->bytes; ++ } ++ ++ goto out; ++ } + + entry->data.ib1 = foe.ib1; ++ acct = mtk_ppe_mib_entry_read(ppe, hash); ++ ret = true; ++ ++out: ++ if (acct) { ++ *packets += acct->packets; ++ *bytes += acct->bytes; ++ } + +- return true; ++ return ret; + } + + static void + mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) + { + u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); ++ u64 *packets = &entry->packets; ++ u64 *bytes = &entry->bytes; + struct mtk_flow_entry *cur; + struct hlist_node *tmp; + int idle; +@@ -589,7 +615,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) { + int cur_idle; + +- if (!mtk_flow_entry_update(ppe, cur)) { ++ if (!mtk_flow_entry_update(ppe, cur, packets, bytes)) { ++ entry->prev_packets += cur->prev_packets; ++ entry->prev_bytes += cur->prev_bytes; + __mtk_foe_entry_clear(ppe, entry, false); + continue; + } +@@ -604,10 +632,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe + } + } + ++void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ int *idle) ++{ ++ entry->packets = entry->prev_packets; ++ entry->bytes = entry->prev_bytes; ++ ++ spin_lock_bh(&ppe_lock); ++ ++ if (entry->type == MTK_FLOW_TYPE_L2) ++ mtk_flow_entry_update_l2(ppe, entry); ++ else ++ mtk_flow_entry_update(ppe, entry, &entry->packets, &entry->bytes); ++ ++ *idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); ++ ++ spin_unlock_bh(&ppe_lock); ++} ++ + static void + __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 hash) + { ++ struct mtk_foe_accounting *acct; + struct mtk_eth *eth = ppe->eth; + u16 timestamp = mtk_eth_timestamp(eth); + struct mtk_foe_entry *hwe; +@@ -638,6 +685,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p + + dma_wmb(); + ++ acct = mtk_ppe_mib_entry_read(ppe, hash); ++ if (acct) { ++ acct->packets = 0; ++ acct->bytes = 0; ++ } ++ + mtk_ppe_cache_clear(ppe); + } + +@@ -802,21 +855,6 @@ out: + spin_unlock_bh(&ppe_lock); + } + +-int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +-{ +- int idle; +- +- spin_lock_bh(&ppe_lock); +- if (entry->type == MTK_FLOW_TYPE_L2) +- mtk_flow_entry_update_l2(ppe, entry); +- else +- mtk_flow_entry_update(ppe, entry); +- idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); +- spin_unlock_bh(&ppe_lock); +- +- return idle; +-} +- + int mtk_ppe_prepare_reset(struct mtk_ppe *ppe) + { + if (!ppe) +@@ -844,32 +882,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe + return mtk_ppe_wait_busy(ppe); + } + +-struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, +- struct mtk_foe_accounting *diff) +-{ +- struct mtk_foe_accounting *acct; +- int size = sizeof(struct mtk_foe_accounting); +- u64 bytes, packets; +- +- if (!ppe->accounting) +- return NULL; +- +- if (mtk_mib_entry_read(ppe, index, &bytes, &packets)) +- return NULL; +- +- acct = ppe->acct_table + index * size; +- +- acct->bytes += bytes; +- acct->packets += packets; +- +- if (diff) { +- diff->bytes = bytes; +- diff->packets = packets; +- } +- +- return acct; +-} +- + struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index) + { + bool accounting = eth->soc->has_accounting; +--- a/drivers/net/ethernet/mediatek/mtk_ppe.h ++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h +@@ -304,6 +304,8 @@ struct mtk_flow_entry { + struct mtk_foe_entry data; + struct rhash_head node; + unsigned long cookie; ++ u64 prev_packets, prev_bytes; ++ u64 packets, bytes; + }; + + struct mtk_mib_entry { +@@ -348,6 +350,7 @@ void mtk_ppe_deinit(struct mtk_eth *eth) + void mtk_ppe_start(struct mtk_ppe *ppe); + int mtk_ppe_stop(struct mtk_ppe *ppe); + int mtk_ppe_prepare_reset(struct mtk_ppe *ppe); ++struct mtk_foe_accounting *mtk_ppe_mib_entry_read(struct mtk_ppe *ppe, u16 index); + + void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); + +@@ -397,9 +400,8 @@ int mtk_foe_entry_set_queue(struct mtk_e + unsigned int queue); + int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); + void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +-int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); + int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index); +-struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, +- struct mtk_foe_accounting *diff); ++void mtk_foe_entry_get_stats(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, ++ int *idle); + + #endif +--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c +@@ -96,7 +96,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file + if (bind && state != MTK_FOE_STATE_BIND) + continue; + +- acct = mtk_foe_entry_get_mib(ppe, i, NULL); ++ acct = mtk_ppe_mib_entry_read(ppe, i); + + type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1); + seq_printf(m, "%05x %s %7s", i, +--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c ++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +@@ -516,24 +516,21 @@ static int + mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) + { + struct mtk_flow_entry *entry; +- struct mtk_foe_accounting diff; +- u32 idle; ++ u64 packets, bytes; ++ int idle; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, + mtk_flow_ht_params); + if (!entry) + return -ENOENT; + +- idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry); ++ packets = entry->packets; ++ bytes = entry->bytes; ++ mtk_foe_entry_get_stats(eth->ppe[entry->ppe_index], entry, &idle); ++ f->stats.pkts += entry->packets - packets; ++ f->stats.bytes += entry->bytes - bytes; + f->stats.lastused = jiffies - idle * HZ; + +- if (entry->hash != 0xFFFF && +- mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash, +- &diff)) { +- f->stats.pkts += diff.packets; +- f->stats.bytes += diff.bytes; +- } +- + return 0; + } +