--- /dev/null
+From 54d989d58d2ac87c8504c2306ba8b4957c60e8dc Mon Sep 17 00:00:00 2001
+Date: Tue, 4 Mar 2025 15:21:08 +0100
+Subject: [PATCH 1/6] net: airoha: Move min/max packet len configuration in
+ airoha_dev_open()
+
+In order to align max allowed packet size to the configured mtu, move
+REG_GDM_LEN_CFG configuration in airoha_dev_open routine.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -138,15 +138,10 @@ static void airoha_fe_maccr_init(struct
+ {
+ int p;
+
+- for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
++ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++)
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
+ GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
+ GDM_DROP_CRC_ERR);
+- airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
+- GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+- FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+- FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
+- }
+
+ airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
+ FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
+@@ -1521,9 +1516,9 @@ static void airoha_update_hw_stats(struc
+
+ static int airoha_dev_open(struct net_device *dev)
+ {
++ int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN;
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_qdma *qdma = port->qdma;
+- int err;
+
+ netif_tx_start_all_queues(dev);
+ err = airoha_set_vip_for_gdm_port(port, true);
+@@ -1537,6 +1532,11 @@ static int airoha_dev_open(struct net_de
+ airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+
++ airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
++ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
++ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
++ FIELD_PREP(GDM_LONG_LEN_MASK, len));
++
+ airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_TX_DMA_EN_MASK |
+ GLOBAL_CFG_RX_DMA_EN_MASK);
--- /dev/null
+From e12182ddb6e712951d21a50e2c8ccd700e41a40c Mon Sep 17 00:00:00 2001
+Date: Tue, 4 Mar 2025 15:21:09 +0100
+Subject: [PATCH 2/6] net: airoha: Enable Rx Scatter-Gather
+
+EN7581 SoC can receive 9k frames. Enable the reception of Scatter-Gather
+(SG) frames.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 68 ++++++++++++++---------
+ drivers/net/ethernet/airoha/airoha_eth.h | 1 +
+ drivers/net/ethernet/airoha/airoha_regs.h | 5 ++
+ 3 files changed, 48 insertions(+), 26 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -615,10 +615,10 @@ static int airoha_qdma_rx_process(struct
+ struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
+ dma_addr_t dma_addr = le32_to_cpu(desc->addr);
++ struct page *page = virt_to_head_page(e->buf);
+ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+ struct airoha_gdm_port *port;
+- struct sk_buff *skb;
+- int len, p;
++ int data_len, len, p;
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
+ break;
+@@ -636,30 +636,41 @@ static int airoha_qdma_rx_process(struct
+ dma_sync_single_for_cpu(eth->dev, dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size), dir);
+
++ data_len = q->skb ? q->buf_size
++ : SKB_WITH_OVERHEAD(q->buf_size);
++ if (data_len < len)
++ goto free_frag;
++
+ p = airoha_qdma_get_gdm_port(eth, desc);
+- if (p < 0 || !eth->ports[p]) {
+- page_pool_put_full_page(q->page_pool,
+- virt_to_head_page(e->buf),
+- true);
+- continue;
+- }
++ if (p < 0 || !eth->ports[p])
++ goto free_frag;
+
+ port = eth->ports[p];
+- skb = napi_build_skb(e->buf, q->buf_size);
+- if (!skb) {
+- page_pool_put_full_page(q->page_pool,
+- virt_to_head_page(e->buf),
+- true);
+- break;
++ if (!q->skb) { /* first buffer */
++ q->skb = napi_build_skb(e->buf, q->buf_size);
++ if (!q->skb)
++ goto free_frag;
++
++ __skb_put(q->skb, len);
++ skb_mark_for_recycle(q->skb);
++ q->skb->dev = port->dev;
++ q->skb->protocol = eth_type_trans(q->skb, port->dev);
++ q->skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb_record_rx_queue(q->skb, qid);
++ } else { /* scattered frame */
++ struct skb_shared_info *shinfo = skb_shinfo(q->skb);
++ int nr_frags = shinfo->nr_frags;
++
++ if (nr_frags >= ARRAY_SIZE(shinfo->frags))
++ goto free_frag;
++
++ skb_add_rx_frag(q->skb, nr_frags, page,
++ e->buf - page_address(page), len,
++ q->buf_size);
+ }
+
+- skb_reserve(skb, 2);
+- __skb_put(skb, len);
+- skb_mark_for_recycle(skb);
+- skb->dev = port->dev;
+- skb->protocol = eth_type_trans(skb, skb->dev);
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- skb_record_rx_queue(skb, qid);
++ if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl))
++ continue;
+
+ if (netdev_uses_dsa(port->dev)) {
+ /* PPE module requires untagged packets to work
+@@ -672,22 +683,27 @@ static int airoha_qdma_rx_process(struct
+
+ if (sptag < ARRAY_SIZE(port->dsa_meta) &&
+ port->dsa_meta[sptag])
+- skb_dst_set_noref(skb,
++ skb_dst_set_noref(q->skb,
+ &port->dsa_meta[sptag]->dst);
+ }
+
+ hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1);
+ if (hash != AIROHA_RXD4_FOE_ENTRY)
+- skb_set_hash(skb, jhash_1word(hash, 0),
++ skb_set_hash(q->skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ airoha_ppe_check_skb(eth->ppe, hash);
+
+- napi_gro_receive(&q->napi, skb);
+-
+ done++;
++ napi_gro_receive(&q->napi, q->skb);
++ q->skb = NULL;
++ continue;
++free_frag:
++ page_pool_put_full_page(q->page_pool, page, true);
++ dev_kfree_skb(q->skb);
++ q->skb = NULL;
+ }
+ airoha_qdma_fill_rx_queue(q);
+
+@@ -763,6 +779,7 @@ static int airoha_qdma_init_rx_queue(str
+ FIELD_PREP(RX_RING_THR_MASK, thr));
+ airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
++ airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK);
+
+ airoha_qdma_fill_rx_queue(q);
+
+@@ -1162,7 +1179,6 @@ static int airoha_qdma_hw_init(struct ai
+ }
+
+ airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
+- GLOBAL_CFG_RX_2B_OFFSET_MASK |
+ FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
+ GLOBAL_CFG_CPU_TXR_RR_MASK |
+ GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -176,6 +176,7 @@ struct airoha_queue {
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
++ struct sk_buff *skb;
+ };
+
+ struct airoha_tx_irq_queue {
+--- a/drivers/net/ethernet/airoha/airoha_regs.h
++++ b/drivers/net/ethernet/airoha/airoha_regs.h
+@@ -626,10 +626,15 @@
+ #define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
++#define REG_RX_SCATTER_CFG(_n) \
++ (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5))
++
+ #define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+ #define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
++#define RX_RING_SG_EN_MASK BIT(0)
++
+ #define REG_INGRESS_TRTCM_CFG 0x0070
+ #define INGRESS_TRTCM_EN_MASK BIT(31)
+ #define INGRESS_TRTCM_MODE_MASK BIT(30)
--- /dev/null
+From 03b1b69f0662c46f258a45e4a7d7837351c11692 Mon Sep 17 00:00:00 2001
+Date: Tue, 4 Mar 2025 15:21:10 +0100
+Subject: [PATCH 3/6] net: airoha: Introduce airoha_dev_change_mtu callback
+
+Add airoha_dev_change_mtu callback to update the MTU of a running
+device.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -1706,6 +1706,20 @@ static void airoha_dev_get_stats64(struc
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+ }
+
++static int airoha_dev_change_mtu(struct net_device *dev, int mtu)
++{
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ struct airoha_eth *eth = port->qdma->eth;
++ u32 len = ETH_HLEN + mtu + ETH_FCS_LEN;
++
++ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id),
++ GDM_LONG_LEN_MASK,
++ FIELD_PREP(GDM_LONG_LEN_MASK, len));
++ WRITE_ONCE(dev->mtu, mtu);
++
++ return 0;
++}
++
+ static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+ {
+@@ -2398,6 +2412,7 @@ static const struct net_device_ops airoh
+ .ndo_init = airoha_dev_init,
+ .ndo_open = airoha_dev_open,
+ .ndo_stop = airoha_dev_stop,
++ .ndo_change_mtu = airoha_dev_change_mtu,
+ .ndo_select_queue = airoha_dev_select_queue,
+ .ndo_start_xmit = airoha_dev_xmit,
+ .ndo_get_stats64 = airoha_dev_get_stats64,
--- /dev/null
+From 168ef0c1dee83c401896a0bca680e9f97b1ebd64 Mon Sep 17 00:00:00 2001
+Date: Tue, 4 Mar 2025 15:21:11 +0100
+Subject: [PATCH 4/6] net: airoha: Increase max mtu to 9k
+
+EN7581 SoC supports 9k maximum MTU.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -20,7 +20,7 @@
+ #define AIROHA_MAX_DSA_PORTS 7
+ #define AIROHA_MAX_NUM_RSTS 3
+ #define AIROHA_MAX_NUM_XSI_RSTS 5
+-#define AIROHA_MAX_MTU 2000
++#define AIROHA_MAX_MTU 9216
+ #define AIROHA_MAX_PACKET_SIZE 2048
+ #define AIROHA_NUM_QOS_CHANNELS 4
+ #define AIROHA_NUM_QOS_QUEUES 8
--- /dev/null
+From 35ea4f06fd33fc32f556a0c26d1d8340497fa7f8 Mon Sep 17 00:00:00 2001
+Date: Tue, 4 Mar 2025 15:38:05 +0100
+Subject: [PATCH 5/6] net: airoha: Fix lan4 support in
+ airoha_qdma_get_gdm_port()
+
+EN7581 SoC supports lan{1,4} ports on MT7530 DSA switch. Fix lan4
+reported value in airoha_qdma_get_gdm_port routine.
+
+Fixes: 23020f0493270 ("net: airoha: Introduce ethernet support for EN7581 SoC")
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -589,7 +589,7 @@ static int airoha_qdma_get_gdm_port(stru
+
+ sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
+ switch (sport) {
+- case 0x10 ... 0x13:
++ case 0x10 ... 0x14:
+ port = 0;
+ break;
+ case 0x2 ... 0x4:
--- /dev/null
+From a202dfe31cae2f2120297a7142385d80a5577d42 Mon Sep 17 00:00:00 2001
+Date: Tue, 4 Mar 2025 16:46:40 +0100
+Subject: [PATCH 6/6] net: airoha: Enable TSO/Scatter Gather for LAN port
+
+Set net_device vlan_features in order to enable TSO and Scatter Gather
+for DSA user ports.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -2503,6 +2503,7 @@ static int airoha_alloc_gdm_port(struct
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_HW_TC;
+ dev->features |= dev->hw_features;
++ dev->vlan_features = dev->hw_features;
+ dev->dev.of_node = np;
+ dev->irq = qdma->irq;
+ SET_NETDEV_DEV(dev, eth->dev);
--- /dev/null
+From e368d2a1e8b6f0926e4e76a56b484249905192f5 Mon Sep 17 00:00:00 2001
+Date: Thu, 6 Mar 2025 11:52:20 +0100
+Subject: [PATCH] net: airoha: Fix dev->dsa_ptr check in airoha_get_dsa_tag()
+
+Fix the following warning reported by Smatch static checker in
+airoha_get_dsa_tag routine:
+
+drivers/net/ethernet/airoha/airoha_eth.c:1722 airoha_get_dsa_tag()
+warn: 'dp' isn't an ERR_PTR
+
+dev->dsa_ptr can't be set to an error pointer, it can just be NULL.
+Remove this check since it is already performed in netdev_uses_dsa().
+
+Closes: https://lore.kernel.org/netdev/Z8l3E0lGOcrel07C@lore-desk/T/#m54adc113fcdd8c5e6c5f65ffd60d8e8b1d483d90
+Fixes: af3cf757d5c9 ("net: airoha: Move DSA tag in DMA descriptor")
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -1742,18 +1742,13 @@ static u32 airoha_get_dsa_tag(struct sk_
+ {
+ #if IS_ENABLED(CONFIG_NET_DSA)
+ struct ethhdr *ehdr;
+- struct dsa_port *dp;
+ u8 xmit_tpid;
+ u16 tag;
+
+ if (!netdev_uses_dsa(dev))
+ return 0;
+
+- dp = dev->dsa_ptr;
+- if (IS_ERR(dp))
+- return 0;
+-
+- if (dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
++ if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return 0;
+
+ if (skb_cow_head(skb, 0))
--- /dev/null
+From 08d0185e36ad8bb5902a73711bf114765d282161 Mon Sep 17 00:00:00 2001
+Date: Fri, 14 Mar 2025 16:49:59 +0100
+Subject: [PATCH] net: airoha: fix CONFIG_DEBUG_FS check
+
+The #if check causes a build failure when CONFIG_DEBUG_FS is turned
+off:
+
+In file included from drivers/net/ethernet/airoha/airoha_eth.c:17:
+drivers/net/ethernet/airoha/airoha_eth.h:543:5: error: "CONFIG_DEBUG_FS" is not defined, evaluates to 0 [-Werror=undef]
+ 543 | #if CONFIG_DEBUG_FS
+ | ^~~~~~~~~~~~~~~
+
+Replace it with the correct #ifdef.
+
+Fixes: 3fe15c640f38 ("net: airoha: Introduce PPE debugfs support")
+---
+ drivers/net/ethernet/airoha/airoha_eth.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -540,7 +540,7 @@ void airoha_ppe_deinit(struct airoha_eth
+ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+
+-#if CONFIG_DEBUG_FS
++#ifdef CONFIG_DEBUG_FS
+ int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+ #else
+ static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe)
--- /dev/null
+From 57b290d97c6150774bf929117ca737a26d8fc33d Mon Sep 17 00:00:00 2001
+Date: Mon, 31 Mar 2025 08:52:53 +0200
+Subject: [PATCH 1/2] net: airoha: Fix qid report in
+ airoha_tc_get_htb_get_leaf_queue()
+
+Fix the following kernel warning deleting HTB offloaded leafs and/or root
+HTB qdisc in airoha_eth driver properly reporting qid in
+airoha_tc_get_htb_get_leaf_queue routine.
+
+$tc qdisc replace dev eth1 root handle 10: htb offload
+$tc class add dev eth1 arent 10: classid 10:4 htb rate 100mbit ceil 100mbit
+$tc qdisc replace dev eth1 parent 10:4 handle 4: ets bands 8 \
+ quanta 1514 3028 4542 6056 7570 9084 10598 12112
+$tc qdisc del dev eth1 root
+
+[ 55.827864] ------------[ cut here ]------------
+[ 55.832493] WARNING: CPU: 3 PID: 2678 at 0xffffffc0798695a4
+[ 55.956510] CPU: 3 PID: 2678 Comm: tc Tainted: G O 6.6.71 #0
+[ 55.963557] Hardware name: Airoha AN7581 Evaluation Board (DT)
+[ 55.969383] pstate: 20400005 (nzCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+[ 55.976344] pc : 0xffffffc0798695a4
+[ 55.979851] lr : 0xffffffc079869a20
+[ 55.983358] sp : ffffffc0850536a0
+[ 55.986665] x29: ffffffc0850536a0 x28: 0000000000000024 x27: 0000000000000001
+[ 55.993800] x26: 0000000000000000 x25: ffffff8008b19000 x24: ffffff800222e800
+[ 56.000935] x23: 0000000000000001 x22: 0000000000000000 x21: ffffff8008b19000
+[ 56.008071] x20: ffffff8002225800 x19: ffffff800379d000 x18: 0000000000000000
+[ 56.015206] x17: ffffffbf9ea59000 x16: ffffffc080018000 x15: 0000000000000000
+[ 56.022342] x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000001
+[ 56.029478] x11: ffffffc081471008 x10: ffffffc081575a98 x9 : 0000000000000000
+[ 56.036614] x8 : ffffffc08167fd40 x7 : ffffffc08069e104 x6 : ffffff8007f86000
+[ 56.043748] x5 : 0000000000000000 x4 : 0000000000000000 x3 : 0000000000000001
+[ 56.050884] x2 : 0000000000000000 x1 : 0000000000000250 x0 : ffffff800222c000
+[ 56.058020] Call trace:
+[ 56.060459] 0xffffffc0798695a4
+[ 56.063618] 0xffffffc079869a20
+[ 56.066777] __qdisc_destroy+0x40/0xa0
+[ 56.070528] qdisc_put+0x54/0x6c
+[ 56.073748] qdisc_graft+0x41c/0x648
+[ 56.077324] tc_get_qdisc+0x168/0x2f8
+[ 56.080978] rtnetlink_rcv_msg+0x230/0x330
+[ 56.085076] netlink_rcv_skb+0x5c/0x128
+[ 56.088913] rtnetlink_rcv+0x14/0x1c
+[ 56.092490] netlink_unicast+0x1e0/0x2c8
+[ 56.096413] netlink_sendmsg+0x198/0x3c8
+[ 56.100337] ____sys_sendmsg+0x1c4/0x274
+[ 56.104261] ___sys_sendmsg+0x7c/0xc0
+[ 56.107924] __sys_sendmsg+0x44/0x98
+[ 56.111492] __arm64_sys_sendmsg+0x20/0x28
+[ 56.115580] invoke_syscall.constprop.0+0x58/0xfc
+[ 56.120285] do_el0_svc+0x3c/0xbc
+[ 56.123592] el0_svc+0x18/0x4c
+[ 56.126647] el0t_64_sync_handler+0x118/0x124
+[ 56.131005] el0t_64_sync+0x150/0x154
+[ 56.134660] ---[ end trace 0000000000000000 ]---
+
+Fixes: ef1ca9271313b ("net: airoha: Add sched HTB offload support")
+Link: https://patch.msgid.link/20250331-airoha-htb-qdisc-offload-del-fix-v1-1-4ea429c2c968@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -2356,7 +2356,7 @@ static int airoha_tc_get_htb_get_leaf_qu
+ return -EINVAL;
+ }
+
+- opt->qid = channel;
++ opt->qid = AIROHA_NUM_TX_RING + channel;
+
+ return 0;
+ }
--- /dev/null
+From 367579274f60cb23c570ae5348966ab51e1509a4 Mon Sep 17 00:00:00 2001
+Date: Mon, 31 Mar 2025 18:17:31 +0200
+Subject: [PATCH 2/2] net: airoha: Fix ETS priomap validation
+
+ETS Qdisc schedules SP bands in a priority order assigning band-0 the
+highest priority (band-0 > band-1 > .. > band-n) while EN7581 arranges
+SP bands in a priority order assigning band-7 the highest priority
+(band-7 > band-6, .. > band-n).
+Fix priomap check in airoha_qdma_set_tx_ets_sched routine in order to
+align ETS Qdisc and airoha_eth driver SP priority ordering.
+
+Fixes: b56e4d660a96 ("net: airoha: Enforce ETS Qdisc priomap")
+Link: https://patch.msgid.link/20250331-airoha-ets-validate-priomap-v1-1-60a524488672@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -2029,7 +2029,7 @@ static int airoha_qdma_set_tx_ets_sched(
+ struct tc_ets_qopt_offload_replace_params *p = &opt->replace_params;
+ enum tx_sched_mode mode = TC_SCH_SP;
+ u16 w[AIROHA_NUM_QOS_QUEUES] = {};
+- int i, nstrict = 0, nwrr, qidx;
++ int i, nstrict = 0;
+
+ if (p->bands > AIROHA_NUM_QOS_QUEUES)
+ return -EINVAL;
+@@ -2047,17 +2047,17 @@ static int airoha_qdma_set_tx_ets_sched(
+ * lowest priorities with respect to SP ones.
+ * e.g: WRR0, WRR1, .., WRRm, SP0, SP1, .., SPn
+ */
+- nwrr = p->bands - nstrict;
+- qidx = nstrict && nwrr ? nstrict : 0;
+- for (i = 1; i <= p->bands; i++) {
+- if (p->priomap[i % AIROHA_NUM_QOS_QUEUES] != qidx)
++ for (i = 0; i < nstrict; i++) {
++ if (p->priomap[p->bands - i - 1] != i)
+ return -EINVAL;
+-
+- qidx = i == nwrr ? 0 : qidx + 1;
+ }
+
+- for (i = 0; i < nwrr; i++)
++ for (i = 0; i < p->bands - nstrict; i++) {
++ if (p->priomap[i] != nstrict + i)
++ return -EINVAL;
++
+ w[i] = p->weights[nstrict + i];
++ }
+
+ if (!nstrict)
+ mode = TC_SCH_WRR8;
--- /dev/null
+From 09bccf56db36501ccb1935d921dc24451e9f57dd Mon Sep 17 00:00:00 2001
+Date: Tue, 1 Apr 2025 11:42:30 +0200
+Subject: [PATCH] net: airoha: Validate egress gdm port in
+ airoha_ppe_foe_entry_prepare()
+
+Dev pointer in airoha_ppe_foe_entry_prepare routine is not strictly
+a device allocated by airoha_eth driver since it is an egress device
+and the flowtable can contain even wlan, pppoe or vlan devices. E.g:
+
+flowtable ft {
+ hook ingress priority filter
+ devices = { eth1, lan1, lan2, lan3, lan4, wlan0 }
+ flags offload ^
+ |
+ "not allocated by airoha_eth" --
+}
+
+In this case airoha_get_dsa_port() will just return the original device
+pointer and we can't assume netdev priv pointer points to an
+airoha_gdm_port struct.
+Fix the issue validating egress gdm port in airoha_ppe_foe_entry_prepare
+routine before accessing net_device priv pointer.
+
+Fixes: 00a7678310fe ("net: airoha: Introduce flowtable offload support")
+Link: https://patch.msgid.link/20250401-airoha-validate-egress-gdm-port-v4-1-c7315d33ce10@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 13 +++++++++++++
+ drivers/net/ethernet/airoha/airoha_eth.h | 3 +++
+ drivers/net/ethernet/airoha/airoha_ppe.c | 8 ++++++--
+ 3 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -2452,6 +2452,19 @@ static void airoha_metadata_dst_free(str
+ }
+ }
+
++bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
++ struct airoha_gdm_port *port)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
++ if (eth->ports[i] == port)
++ return true;
++ }
++
++ return false;
++}
++
+ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
+ struct device_node *np, int index)
+ {
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -532,6 +532,9 @@ u32 airoha_rmw(void __iomem *base, u32 o
+ #define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
++bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
++ struct airoha_gdm_port *port);
++
+ void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
+ int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -197,7 +197,8 @@ static int airoha_get_dsa_port(struct ne
+ #endif
+ }
+
+-static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe,
++static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
++ struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+ struct airoha_flow_data *data,
+ int l4proto)
+@@ -225,6 +226,9 @@ static int airoha_ppe_foe_entry_prepare(
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u8 pse_port;
+
++ if (!airoha_is_valid_gdm_port(eth, port))
++ return -EINVAL;
++
+ if (dsa_port >= 0)
+ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ else
+@@ -633,7 +637,7 @@ static int airoha_ppe_flow_offload_repla
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+- err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type,
++ err = airoha_ppe_foe_entry_prepare(eth, &hwe, odev, offload_type,
+ &data, l4proto);
+ if (err)
+ return err;
--- /dev/null
+From b4916f67902e2ae1dc8e37dfa45e8894ad2f8921 Mon Sep 17 00:00:00 2001
+Date: Wed, 9 Apr 2025 11:47:14 +0200
+Subject: [PATCH 1/2] net: airoha: Add l2_flows rhashtable
+
+Introduce l2_flows rhashtable in airoha_ppe struct in order to
+store L2 flows committed by upper layers of the kernel. This is a
+preliminary patch in order to offload L2 traffic rules.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.h | 15 +++-
+ drivers/net/ethernet/airoha/airoha_ppe.c | 103 ++++++++++++++++++-----
+ 2 files changed, 98 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -422,12 +422,23 @@ struct airoha_flow_data {
+ } pppoe;
+ };
+
++enum airoha_flow_entry_type {
++ FLOW_TYPE_L4,
++ FLOW_TYPE_L2,
++ FLOW_TYPE_L2_SUBFLOW,
++};
++
+ struct airoha_flow_table_entry {
+- struct hlist_node list;
++ union {
++ struct hlist_node list; /* PPE L3 flow entry */
++ struct rhash_head l2_node; /* L2 flow entry */
++ };
+
+ struct airoha_foe_entry data;
+ u32 hash;
+
++ enum airoha_flow_entry_type type;
++
+ struct rhash_head node;
+ unsigned long cookie;
+ };
+@@ -480,6 +491,8 @@ struct airoha_ppe {
+ void *foe;
+ dma_addr_t foe_dma;
+
++ struct rhashtable l2_flows;
++
+ struct hlist_head *foe_flow;
+ u16 foe_check_time[PPE_NUM_ENTRIES];
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -24,6 +24,13 @@ static const struct rhashtable_params ai
+ .automatic_shrinking = true,
+ };
+
++static const struct rhashtable_params airoha_l2_flow_table_params = {
++ .head_offset = offsetof(struct airoha_flow_table_entry, l2_node),
++ .key_offset = offsetof(struct airoha_flow_table_entry, data.bridge),
++ .key_len = 2 * ETH_ALEN,
++ .automatic_shrinking = true,
++};
++
+ static bool airoha_ppe2_is_enabled(struct airoha_eth *eth)
+ {
+ return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK;
+@@ -476,6 +483,43 @@ static int airoha_ppe_foe_commit_entry(s
+ return 0;
+ }
+
++static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
++ struct airoha_flow_table_entry *e)
++{
++ lockdep_assert_held(&ppe_lock);
++
++ hlist_del_init(&e->list);
++ if (e->hash != 0xffff) {
++ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
++ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
++ AIROHA_FOE_STATE_INVALID);
++ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
++ e->hash = 0xffff;
++ }
++}
++
++static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
++ struct airoha_flow_table_entry *e)
++{
++ lockdep_assert_held(&ppe_lock);
++
++ rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
++ airoha_l2_flow_table_params);
++}
++
++static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
++ struct airoha_flow_table_entry *e)
++{
++ spin_lock_bh(&ppe_lock);
++
++ if (e->type == FLOW_TYPE_L2)
++ airoha_ppe_foe_remove_l2_flow(ppe, e);
++ else
++ airoha_ppe_foe_remove_flow(ppe, e);
++
++ spin_unlock_bh(&ppe_lock);
++}
++
+ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
+ {
+ struct airoha_flow_table_entry *e;
+@@ -505,11 +549,37 @@ unlock:
+ spin_unlock_bh(&ppe_lock);
+ }
+
++static int
++airoha_ppe_foe_l2_flow_commit_entry(struct airoha_ppe *ppe,
++ struct airoha_flow_table_entry *e)
++{
++ struct airoha_flow_table_entry *prev;
++
++ e->type = FLOW_TYPE_L2;
++ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &e->l2_node,
++ airoha_l2_flow_table_params);
++ if (!prev)
++ return 0;
++
++ if (IS_ERR(prev))
++ return PTR_ERR(prev);
++
++ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
++ &e->l2_node,
++ airoha_l2_flow_table_params);
++}
++
+ static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+ {
+- u32 hash = airoha_ppe_foe_get_entry_hash(&e->data);
++ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1);
++ u32 hash;
+
++ if (type == PPE_PKT_TYPE_BRIDGE)
++ return airoha_ppe_foe_l2_flow_commit_entry(ppe, e);
++
++ hash = airoha_ppe_foe_get_entry_hash(&e->data);
++ e->type = FLOW_TYPE_L4;
+ e->hash = 0xffff;
+
+ spin_lock_bh(&ppe_lock);
+@@ -519,23 +589,6 @@ static int airoha_ppe_foe_flow_commit_en
+ return 0;
+ }
+
+-static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+- struct airoha_flow_table_entry *e)
+-{
+- spin_lock_bh(&ppe_lock);
+-
+- hlist_del_init(&e->list);
+- if (e->hash != 0xffff) {
+- e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+- e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+- AIROHA_FOE_STATE_INVALID);
+- airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+- e->hash = 0xffff;
+- }
+-
+- spin_unlock_bh(&ppe_lock);
+-}
+-
+ static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+ {
+@@ -890,9 +943,20 @@ int airoha_ppe_init(struct airoha_eth *e
+ if (err)
+ return err;
+
++ err = rhashtable_init(&ppe->l2_flows, &airoha_l2_flow_table_params);
++ if (err)
++ goto error_flow_table_destroy;
++
+ err = airoha_ppe_debugfs_init(ppe);
+ if (err)
+- rhashtable_destroy(ð->flow_table);
++ goto error_l2_flow_table_destroy;
++
++ return 0;
++
++error_l2_flow_table_destroy:
++ rhashtable_destroy(&ppe->l2_flows);
++error_flow_table_destroy:
++ rhashtable_destroy(ð->flow_table);
+
+ return err;
+ }
+@@ -909,6 +973,7 @@ void airoha_ppe_deinit(struct airoha_eth
+ }
+ rcu_read_unlock();
+
++ rhashtable_destroy(ð->ppe->l2_flows);
+ rhashtable_destroy(ð->flow_table);
+ debugfs_remove(eth->ppe->debugfs_dir);
+ }
--- /dev/null
+From cd53f622611f9a6dd83b858c85448dd3568b67ec Mon Sep 17 00:00:00 2001
+Date: Wed, 9 Apr 2025 11:47:15 +0200
+Subject: [PATCH 2/2] net: airoha: Add L2 hw acceleration support
+
+Similar to mtk driver, introduce the capability to offload L2 traffic
+defining flower rules in the PSE/PPE engine available on EN7581 SoC.
+Since the hw always reports L2/L3/L4 flower rules, link all L2 rules
+sharing the same L2 info (with different L3/L4 info) in the L2 subflows
+list of a given L2 PPE entry.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 2 +-
+ drivers/net/ethernet/airoha/airoha_eth.h | 9 +-
+ drivers/net/ethernet/airoha/airoha_ppe.c | 121 ++++++++++++++++++++---
+ 3 files changed, 115 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -694,7 +694,7 @@ static int airoha_qdma_rx_process(struct
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+- airoha_ppe_check_skb(eth->ppe, hash);
++ airoha_ppe_check_skb(eth->ppe, q->skb, hash);
+
+ done++;
+ napi_gro_receive(&q->napi, q->skb);
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -431,10 +431,14 @@ enum airoha_flow_entry_type {
+ struct airoha_flow_table_entry {
+ union {
+ struct hlist_node list; /* PPE L3 flow entry */
+- struct rhash_head l2_node; /* L2 flow entry */
++ struct {
++ struct rhash_head l2_node; /* L2 flow entry */
++ struct hlist_head l2_flows; /* PPE L2 subflows list */
++ };
+ };
+
+ struct airoha_foe_entry data;
++ struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
+ u32 hash;
+
+ enum airoha_flow_entry_type type;
+@@ -548,7 +552,8 @@ u32 airoha_rmw(void __iomem *base, u32 o
+ bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port);
+
+-void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash);
++void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
++ u16 hash);
+ int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+ int airoha_ppe_init(struct airoha_eth *eth);
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -204,6 +204,15 @@ static int airoha_get_dsa_port(struct ne
+ #endif
+ }
+
++static void airoha_ppe_foe_set_bridge_addrs(struct airoha_foe_bridge *br,
++ struct ethhdr *eh)
++{
++ br->dest_mac_hi = get_unaligned_be32(eh->h_dest);
++ br->dest_mac_lo = get_unaligned_be16(eh->h_dest + 4);
++ br->src_mac_hi = get_unaligned_be16(eh->h_source);
++ br->src_mac_lo = get_unaligned_be32(eh->h_source + 2);
++}
++
+ static int airoha_ppe_foe_entry_prepare(struct airoha_eth *eth,
+ struct airoha_foe_entry *hwe,
+ struct net_device *dev, int type,
+@@ -254,13 +263,7 @@ static int airoha_ppe_foe_entry_prepare(
+
+ qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+- hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+- hwe->bridge.dest_mac_lo =
+- get_unaligned_be16(data->eth.h_dest + 4);
+- hwe->bridge.src_mac_hi =
+- get_unaligned_be16(data->eth.h_source);
+- hwe->bridge.src_mac_lo =
+- get_unaligned_be32(data->eth.h_source + 2);
++ airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
+ hwe->bridge.data = qdata;
+ hwe->bridge.ib2 = val;
+ l2 = &hwe->bridge.l2.common;
+@@ -385,6 +388,19 @@ static u32 airoha_ppe_foe_get_entry_hash
+ hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1];
+ hv3 ^= hwe->ipv6.src_ip[0];
+ break;
++ case PPE_PKT_TYPE_BRIDGE: {
++ struct airoha_foe_mac_info *l2 = &hwe->bridge.l2;
++
++ hv1 = l2->common.src_mac_hi & 0xffff;
++ hv1 = hv1 << 16 | l2->src_mac_lo;
++
++ hv2 = l2->common.dest_mac_lo;
++ hv2 = hv2 << 16;
++ hv2 = hv2 | ((l2->common.src_mac_hi & 0xffff0000) >> 16);
++
++ hv3 = l2->common.dest_mac_hi;
++ break;
++ }
+ case PPE_PKT_TYPE_IPV4_DSLITE:
+ case PPE_PKT_TYPE_IPV6_6RD:
+ default:
+@@ -496,15 +512,24 @@ static void airoha_ppe_foe_remove_flow(s
+ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
+ e->hash = 0xffff;
+ }
++ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
++ hlist_del_init(&e->l2_subflow_node);
++ kfree(e);
++ }
+ }
+
+ static void airoha_ppe_foe_remove_l2_flow(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e)
+ {
++ struct hlist_head *head = &e->l2_flows;
++ struct hlist_node *n;
++
+ lockdep_assert_held(&ppe_lock);
+
+ rhashtable_remove_fast(&ppe->l2_flows, &e->l2_node,
+ airoha_l2_flow_table_params);
++ hlist_for_each_entry_safe(e, n, head, l2_subflow_node)
++ airoha_ppe_foe_remove_flow(ppe, e);
+ }
+
+ static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe,
+@@ -520,10 +545,56 @@ static void airoha_ppe_foe_flow_remove_e
+ spin_unlock_bh(&ppe_lock);
+ }
+
+-static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash)
++static int
++airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
++ struct airoha_flow_table_entry *e,
++ u32 hash)
++{
++ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
++ struct airoha_foe_entry *hwe_p, hwe;
++ struct airoha_flow_table_entry *f;
++ struct airoha_foe_mac_info *l2;
++ int type;
++
++ hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
++ if (!hwe_p)
++ return -EINVAL;
++
++ f = kzalloc(sizeof(*f), GFP_ATOMIC);
++ if (!f)
++ return -ENOMEM;
++
++ hlist_add_head(&f->l2_subflow_node, &e->l2_flows);
++ f->type = FLOW_TYPE_L2_SUBFLOW;
++ f->hash = hash;
++
++ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
++ hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
++ l2 = &hwe.bridge.l2;
++ memcpy(l2, &e->data.bridge.l2, sizeof(*l2));
++
++ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
++ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
++ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
++ sizeof(hwe.ipv4.new_tuple));
++ else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T &&
++ l2->common.etype == ETH_P_IP)
++ l2->common.etype = ETH_P_IPV6;
++
++ hwe.bridge.ib2 = e->data.bridge.ib2;
++ airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
++
++ return 0;
++}
++
++static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
++ struct sk_buff *skb,
++ u32 hash)
+ {
+ struct airoha_flow_table_entry *e;
++ struct airoha_foe_bridge br = {};
+ struct airoha_foe_entry *hwe;
++ bool commit_done = false;
+ struct hlist_node *n;
+ u32 index, state;
+
+@@ -539,12 +610,33 @@ static void airoha_ppe_foe_insert_entry(
+
+ index = airoha_ppe_foe_get_entry_hash(hwe);
+ hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) {
+- if (airoha_ppe_foe_compare_entry(e, hwe)) {
+- airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+- e->hash = hash;
+- break;
++ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
++ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1);
++ if (state != AIROHA_FOE_STATE_BIND) {
++ e->hash = 0xffff;
++ airoha_ppe_foe_remove_flow(ppe, e);
++ }
++ continue;
++ }
++
++ if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
++ e->hash = 0xffff;
++ continue;
+ }
++
++ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
++ commit_done = true;
++ e->hash = hash;
+ }
++
++ if (commit_done)
++ goto unlock;
++
++ airoha_ppe_foe_set_bridge_addrs(&br, eth_hdr(skb));
++ e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
++ airoha_l2_flow_table_params);
++ if (e)
++ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
+ unlock:
+ spin_unlock_bh(&ppe_lock);
+ }
+@@ -899,7 +991,8 @@ int airoha_ppe_setup_tc_block_cb(enum tc
+ return err;
+ }
+
+-void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash)
++void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
++ u16 hash)
+ {
+ u16 now, diff;
+
+@@ -912,7 +1005,7 @@ void airoha_ppe_check_skb(struct airoha_
+ return;
+
+ ppe->foe_check_time[hash] = now;
+- airoha_ppe_foe_insert_entry(ppe, hash);
++ airoha_ppe_foe_insert_entry(ppe, skb, hash);
+ }
+
+ int airoha_ppe_init(struct airoha_eth *eth)
--- /dev/null
+From df8398fb7bb7a0e509200af56b79343aa133b7d6 Mon Sep 17 00:00:00 2001
+Date: Tue, 15 Apr 2025 09:14:34 +0200
+Subject: [PATCH] net: airoha: Add matchall filter offload support
+
+Introduce tc matchall filter offload support in airoha_eth driver.
+Matchall hw filter is used to implement hw rate policing via tc action
+police:
+
+$tc qdisc add dev eth0 handle ffff: ingress
+$tc filter add dev eth0 parent ffff: matchall action police \
+ rate 100mbit burst 1000k drop
+
+The current implementation supports just drop/accept as exceed/notexceed
+actions. Moreover, rate and burst are the only supported configuration
+parameters.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 273 +++++++++++++++++++++-
+ drivers/net/ethernet/airoha/airoha_eth.h | 8 +-
+ drivers/net/ethernet/airoha/airoha_ppe.c | 9 +-
+ drivers/net/ethernet/airoha/airoha_regs.h | 7 +
+ 4 files changed, 286 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -527,6 +527,25 @@ static int airoha_fe_init(struct airoha_
+ /* disable IFC by default */
+ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
+
++ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0),
++ FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM1) |
++ FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM1) |
++ FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM1) |
++ FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM1) |
++ FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM1) |
++ FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM1) |
++ FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM1) |
++ FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM1));
++ airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(1),
++ FIELD_PREP(DFT_CPORT_MASK(7), FE_PSE_PORT_CDM2) |
++ FIELD_PREP(DFT_CPORT_MASK(6), FE_PSE_PORT_CDM2) |
++ FIELD_PREP(DFT_CPORT_MASK(5), FE_PSE_PORT_CDM2) |
++ FIELD_PREP(DFT_CPORT_MASK(4), FE_PSE_PORT_CDM2) |
++ FIELD_PREP(DFT_CPORT_MASK(3), FE_PSE_PORT_CDM2) |
++ FIELD_PREP(DFT_CPORT_MASK(2), FE_PSE_PORT_CDM2) |
++ FIELD_PREP(DFT_CPORT_MASK(1), FE_PSE_PORT_CDM2) |
++ FIELD_PREP(DFT_CPORT_MASK(0), FE_PSE_PORT_CDM2));
++
+ /* enable 1:N vlan action, init vlan table */
+ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
+
+@@ -1632,7 +1651,6 @@ static void airhoha_set_gdm2_loopback(st
+
+ if (port->id == 3) {
+ /* FIXME: handle XSI_PCE1_PORT */
+- airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500);
+ airoha_fe_rmw(eth, REG_FE_WAN_PORT,
+ WAN1_EN_MASK | WAN1_MASK | WAN0_MASK,
+ FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT));
+@@ -2107,6 +2125,125 @@ static int airoha_tc_setup_qdisc_ets(str
+ }
+ }
+
++static int airoha_qdma_get_rl_param(struct airoha_qdma *qdma, int queue_id,
++ u32 addr, enum trtcm_param_type param,
++ u32 *val_low, u32 *val_high)
++{
++ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
++ u32 val, config = FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
++ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
++ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
++
++ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
++ if (read_poll_timeout(airoha_qdma_rr, val,
++ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
++ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true, qdma,
++ REG_TRTCM_CFG_PARAM(addr)))
++ return -ETIMEDOUT;
++
++ *val_low = airoha_qdma_rr(qdma, REG_TRTCM_DATA_LOW(addr));
++ if (val_high)
++ *val_high = airoha_qdma_rr(qdma, REG_TRTCM_DATA_HIGH(addr));
++
++ return 0;
++}
++
++static int airoha_qdma_set_rl_param(struct airoha_qdma *qdma, int queue_id,
++ u32 addr, enum trtcm_param_type param,
++ u32 val)
++{
++ u32 idx = QDMA_METER_IDX(queue_id), group = QDMA_METER_GROUP(queue_id);
++ u32 config = RATE_LIMIT_PARAM_RW_MASK |
++ FIELD_PREP(RATE_LIMIT_PARAM_TYPE_MASK, param) |
++ FIELD_PREP(RATE_LIMIT_METER_GROUP_MASK, group) |
++ FIELD_PREP(RATE_LIMIT_PARAM_INDEX_MASK, idx);
++
++ airoha_qdma_wr(qdma, REG_TRTCM_DATA_LOW(addr), val);
++ airoha_qdma_wr(qdma, REG_TRTCM_CFG_PARAM(addr), config);
++
++ return read_poll_timeout(airoha_qdma_rr, val,
++ val & RATE_LIMIT_PARAM_RW_DONE_MASK,
++ USEC_PER_MSEC, 10 * USEC_PER_MSEC, true,
++ qdma, REG_TRTCM_CFG_PARAM(addr));
++}
++
++static int airoha_qdma_set_rl_config(struct airoha_qdma *qdma, int queue_id,
++ u32 addr, bool enable, u32 enable_mask)
++{
++ u32 val;
++ int err;
++
++ err = airoha_qdma_get_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
++ &val, NULL);
++ if (err)
++ return err;
++
++ val = enable ? val | enable_mask : val & ~enable_mask;
++
++ return airoha_qdma_set_rl_param(qdma, queue_id, addr, TRTCM_MISC_MODE,
++ val);
++}
++
++static int airoha_qdma_set_rl_token_bucket(struct airoha_qdma *qdma,
++ int queue_id, u32 rate_val,
++ u32 bucket_size)
++{
++ u32 val, config, tick, unit, rate, rate_frac;
++ int err;
++
++ err = airoha_qdma_get_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
++ TRTCM_MISC_MODE, &config, NULL);
++ if (err)
++ return err;
++
++ val = airoha_qdma_rr(qdma, REG_INGRESS_TRTCM_CFG);
++ tick = FIELD_GET(INGRESS_FAST_TICK_MASK, val);
++ if (config & TRTCM_TICK_SEL)
++ tick *= FIELD_GET(INGRESS_SLOW_TICK_RATIO_MASK, val);
++ if (!tick)
++ return -EINVAL;
++
++ unit = (config & TRTCM_PKT_MODE) ? 1000000 / tick : 8000 / tick;
++ if (!unit)
++ return -EINVAL;
++
++ rate = rate_val / unit;
++ rate_frac = rate_val % unit;
++ rate_frac = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate_frac) / unit;
++ rate = FIELD_PREP(TRTCM_TOKEN_RATE_MASK, rate) |
++ FIELD_PREP(TRTCM_TOKEN_RATE_FRACTION_MASK, rate_frac);
++
++ err = airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
++ TRTCM_TOKEN_RATE_MODE, rate);
++ if (err)
++ return err;
++
++ val = bucket_size;
++ if (!(config & TRTCM_PKT_MODE))
++ val = max_t(u32, val, MIN_TOKEN_SIZE);
++ val = min_t(u32, __fls(val), MAX_TOKEN_SIZE_OFFSET);
++
++ return airoha_qdma_set_rl_param(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
++ TRTCM_BUCKETSIZE_SHIFT_MODE, val);
++}
++
++static int airoha_qdma_init_rl_config(struct airoha_qdma *qdma, int queue_id,
++ bool enable, enum trtcm_unit_type unit)
++{
++ bool tick_sel = queue_id == 0 || queue_id == 2 || queue_id == 8;
++ enum trtcm_param mode = TRTCM_METER_MODE;
++ int err;
++
++ mode |= unit == TRTCM_PACKET_UNIT ? TRTCM_PKT_MODE : 0;
++ err = airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
++ enable, mode);
++ if (err)
++ return err;
++
++ return airoha_qdma_set_rl_config(qdma, queue_id, REG_INGRESS_TRTCM_CFG,
++ tick_sel, TRTCM_TICK_SEL);
++}
++
+ static int airoha_qdma_get_trtcm_param(struct airoha_qdma *qdma, int channel,
+ u32 addr, enum trtcm_param_type param,
+ enum trtcm_mode_type mode,
+@@ -2271,10 +2408,142 @@ static int airoha_tc_htb_alloc_leaf_queu
+ return 0;
+ }
+
++static int airoha_qdma_set_rx_meter(struct airoha_gdm_port *port,
++ u32 rate, u32 bucket_size,
++ enum trtcm_unit_type unit_type)
++{
++ struct airoha_qdma *qdma = port->qdma;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
++ int err;
++
++ if (!qdma->q_rx[i].ndesc)
++ continue;
++
++ err = airoha_qdma_init_rl_config(qdma, i, !!rate, unit_type);
++ if (err)
++ return err;
++
++ err = airoha_qdma_set_rl_token_bucket(qdma, i, rate,
++ bucket_size);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
++static int airoha_tc_matchall_act_validate(struct tc_cls_matchall_offload *f)
++{
++ const struct flow_action *actions = &f->rule->action;
++ const struct flow_action_entry *act;
++
++ if (!flow_action_has_entries(actions)) {
++ NL_SET_ERR_MSG_MOD(f->common.extack,
++ "filter run with no actions");
++ return -EINVAL;
++ }
++
++ if (!flow_offload_has_one_action(actions)) {
++ NL_SET_ERR_MSG_MOD(f->common.extack,
++ "only once action per filter is supported");
++ return -EOPNOTSUPP;
++ }
++
++ act = &actions->entries[0];
++ if (act->id != FLOW_ACTION_POLICE) {
++ NL_SET_ERR_MSG_MOD(f->common.extack, "unsupported action");
++ return -EOPNOTSUPP;
++ }
++
++ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
++ NL_SET_ERR_MSG_MOD(f->common.extack,
++ "invalid exceed action id");
++ return -EOPNOTSUPP;
++ }
++
++ if (act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
++ NL_SET_ERR_MSG_MOD(f->common.extack,
++ "invalid notexceed action id");
++ return -EOPNOTSUPP;
++ }
++
++ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
++ !flow_action_is_last_entry(actions, act)) {
++ NL_SET_ERR_MSG_MOD(f->common.extack,
++ "action accept must be last");
++ return -EOPNOTSUPP;
++ }
++
++ if (act->police.peakrate_bytes_ps || act->police.avrate ||
++ act->police.overhead || act->police.mtu) {
++ NL_SET_ERR_MSG_MOD(f->common.extack,
++ "peakrate/avrate/overhead/mtu unsupported");
++ return -EOPNOTSUPP;
++ }
++
++ return 0;
++}
++
++static int airoha_dev_tc_matchall(struct net_device *dev,
++ struct tc_cls_matchall_offload *f)
++{
++ enum trtcm_unit_type unit_type = TRTCM_BYTE_UNIT;
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ u32 rate = 0, bucket_size = 0;
++
++ switch (f->command) {
++ case TC_CLSMATCHALL_REPLACE: {
++ const struct flow_action_entry *act;
++ int err;
++
++ err = airoha_tc_matchall_act_validate(f);
++ if (err)
++ return err;
++
++ act = &f->rule->action.entries[0];
++ if (act->police.rate_pkt_ps) {
++ rate = act->police.rate_pkt_ps;
++ bucket_size = act->police.burst_pkt;
++ unit_type = TRTCM_PACKET_UNIT;
++ } else {
++ rate = div_u64(act->police.rate_bytes_ps, 1000);
++ rate = rate << 3; /* Kbps */
++ bucket_size = act->police.burst;
++ }
++ fallthrough;
++ }
++ case TC_CLSMATCHALL_DESTROY:
++ return airoha_qdma_set_rx_meter(port, rate, bucket_size,
++ unit_type);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int airoha_dev_setup_tc_block_cb(enum tc_setup_type type,
++ void *type_data, void *cb_priv)
++{
++ struct net_device *dev = cb_priv;
++
++ if (!tc_can_offload(dev))
++ return -EOPNOTSUPP;
++
++ switch (type) {
++ case TC_SETUP_CLSFLOWER:
++ return airoha_ppe_setup_tc_block_cb(dev, type_data);
++ case TC_SETUP_CLSMATCHALL:
++ return airoha_dev_tc_matchall(dev, type_data);
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
+ static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port,
+ struct flow_block_offload *f)
+ {
+- flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb;
++ flow_setup_cb_t *cb = airoha_dev_setup_tc_block_cb;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -127,6 +127,11 @@ enum tx_sched_mode {
+ TC_SCH_WRR2,
+ };
+
++enum trtcm_unit_type {
++ TRTCM_BYTE_UNIT,
++ TRTCM_PACKET_UNIT,
++};
++
+ enum trtcm_param_type {
+ TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */
+ TRTCM_TOKEN_RATE_MODE,
+@@ -554,8 +559,7 @@ bool airoha_is_valid_gdm_port(struct air
+
+ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+ u16 hash);
+-int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+- void *cb_priv);
++int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
+ int airoha_ppe_init(struct airoha_eth *eth);
+ void airoha_ppe_deinit(struct airoha_eth *eth);
+ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -967,18 +967,13 @@ error_npu_put:
+ return err;
+ }
+
+-int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+- void *cb_priv)
++int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
+ {
+- struct flow_cls_offload *cls = type_data;
+- struct net_device *dev = cb_priv;
+ struct airoha_gdm_port *port = netdev_priv(dev);
++ struct flow_cls_offload *cls = type_data;
+ struct airoha_eth *eth = port->qdma->eth;
+ int err = 0;
+
+- if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER)
+- return -EOPNOTSUPP;
+-
+ mutex_lock(&flow_offload_mutex);
+
+ if (!eth->npu)
+--- a/drivers/net/ethernet/airoha/airoha_regs.h
++++ b/drivers/net/ethernet/airoha/airoha_regs.h
+@@ -283,6 +283,7 @@
+ #define PPE_HASH_SEED 0x12345678
+
+ #define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248)
++#define DFT_CPORT_MASK(_n) GENMASK(3 + ((_n) << 2), ((_n) << 2))
+
+ #define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c)
+
+@@ -691,6 +692,12 @@
+ #define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8)
+ #define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc)
+
++#define RATE_LIMIT_PARAM_RW_MASK BIT(31)
++#define RATE_LIMIT_PARAM_RW_DONE_MASK BIT(30)
++#define RATE_LIMIT_PARAM_TYPE_MASK GENMASK(29, 28)
++#define RATE_LIMIT_METER_GROUP_MASK GENMASK(27, 26)
++#define RATE_LIMIT_PARAM_INDEX_MASK GENMASK(23, 16)
++
+ #define REG_TXWRR_MODE_CFG 0x1020
+ #define TWRR_WEIGHT_SCALE_MASK BIT(31)
+ #define TWRR_WEIGHT_BASE_MASK BIT(3)
--- /dev/null
+From 9439db26d3ee4a897e5cd108864172531f31ce07 Mon Sep 17 00:00:00 2001
+Date: Fri, 18 Apr 2025 12:40:49 +0200
+Subject: [PATCH 1/2] net: airoha: Introduce airoha_irq_bank struct
+
+EN7581 ethernet SoC supports 4 programmable IRQ lines each one composed
+by 4 IRQ configuration registers. Add airoha_irq_bank struct as a
+container for independent IRQ lines info (e.g. IRQ number, enabled source
+interrupts, ecc). This is a preliminary patch to support multiple IRQ lines
+in airoha_eth driver.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 106 ++++++++++++++--------
+ drivers/net/ethernet/airoha/airoha_eth.h | 13 ++-
+ drivers/net/ethernet/airoha/airoha_regs.h | 11 ++-
+ 3 files changed, 86 insertions(+), 44 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -34,37 +34,40 @@ u32 airoha_rmw(void __iomem *base, u32 o
+ return val;
+ }
+
+-static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
+- u32 clear, u32 set)
++static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
++ int index, u32 clear, u32 set)
+ {
++ struct airoha_qdma *qdma = irq_bank->qdma;
++ int bank = irq_bank - &qdma->irq_banks[0];
+ unsigned long flags;
+
+- if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
++ if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
+ return;
+
+- spin_lock_irqsave(&qdma->irq_lock, flags);
++ spin_lock_irqsave(&irq_bank->irq_lock, flags);
+
+- qdma->irqmask[index] &= ~clear;
+- qdma->irqmask[index] |= set;
+- airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
++ irq_bank->irqmask[index] &= ~clear;
++ irq_bank->irqmask[index] |= set;
++ airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
++ irq_bank->irqmask[index]);
+ /* Read irq_enable register in order to guarantee the update above
+ * completes in the spinlock critical section.
+ */
+- airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
++ airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
+
+- spin_unlock_irqrestore(&qdma->irq_lock, flags);
++ spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
+ }
+
+-static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
+- u32 mask)
++static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
++ int index, u32 mask)
+ {
+- airoha_qdma_set_irqmask(qdma, index, 0, mask);
++ airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
+ }
+
+-static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
+- u32 mask)
++static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
++ int index, u32 mask)
+ {
+- airoha_qdma_set_irqmask(qdma, index, mask, 0);
++ airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
+ }
+
+ static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+@@ -732,6 +735,7 @@ free_frag:
+ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
+ {
+ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
++ struct airoha_irq_bank *irq_bank = &q->qdma->irq_banks[0];
+ int cur, done = 0;
+
+ do {
+@@ -740,7 +744,7 @@ static int airoha_qdma_rx_napi_poll(stru
+ } while (cur && done < budget);
+
+ if (done < budget && napi_complete(napi))
+- airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
++ airoha_qdma_irq_enable(irq_bank, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ return done;
+@@ -945,7 +949,7 @@ unlock:
+ }
+
+ if (done < budget && napi_complete(napi))
+- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
++ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(id));
+
+ return done;
+@@ -1176,13 +1180,16 @@ static int airoha_qdma_hw_init(struct ai
+ int i;
+
+ /* clear pending irqs */
+- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
++ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks[0].irqmask); i++)
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
+
+ /* setup irqs */
+- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
+- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
+- airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
++ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
++ INT_IDX0_MASK);
++ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX1,
++ INT_IDX1_MASK);
++ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
++ INT_IDX4_MASK);
+
+ /* setup irq binding */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+@@ -1227,13 +1234,14 @@ static int airoha_qdma_hw_init(struct ai
+
+ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
+ {
+- struct airoha_qdma *qdma = dev_instance;
+- u32 intr[ARRAY_SIZE(qdma->irqmask)];
++ struct airoha_irq_bank *irq_bank = dev_instance;
++ struct airoha_qdma *qdma = irq_bank->qdma;
++ u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
+ int i;
+
+- for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
++ for (i = 0; i < ARRAY_SIZE(intr); i++) {
+ intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
+- intr[i] &= qdma->irqmask[i];
++ intr[i] &= irq_bank->irqmask[i];
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
+ }
+
+@@ -1241,7 +1249,7 @@ static irqreturn_t airoha_irq_handler(in
+ return IRQ_NONE;
+
+ if (intr[1] & RX_DONE_INT_MASK) {
+- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
++ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+@@ -1258,7 +1266,7 @@ static irqreturn_t airoha_irq_handler(in
+ if (!(intr[0] & TX_DONE_INT_MASK(i)))
+ continue;
+
+- airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
++ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(i));
+ napi_schedule(&qdma->q_tx_irq[i].napi);
+ }
+@@ -1267,6 +1275,39 @@ static irqreturn_t airoha_irq_handler(in
+ return IRQ_HANDLED;
+ }
+
++static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
++ struct airoha_qdma *qdma)
++{
++ struct airoha_eth *eth = qdma->eth;
++ int i, id = qdma - ð->qdma[0];
++
++ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
++ struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
++ int err, irq_index = 4 * id + i;
++ const char *name;
++
++ spin_lock_init(&irq_bank->irq_lock);
++ irq_bank->qdma = qdma;
++
++ irq_bank->irq = platform_get_irq(pdev, irq_index);
++ if (irq_bank->irq < 0)
++ return irq_bank->irq;
++
++ name = devm_kasprintf(eth->dev, GFP_KERNEL,
++ KBUILD_MODNAME ".%d", irq_index);
++ if (!name)
++ return -ENOMEM;
++
++ err = devm_request_irq(eth->dev, irq_bank->irq,
++ airoha_irq_handler, IRQF_SHARED, name,
++ irq_bank);
++ if (err)
++ return err;
++ }
++
++ return 0;
++}
++
+ static int airoha_qdma_init(struct platform_device *pdev,
+ struct airoha_eth *eth,
+ struct airoha_qdma *qdma)
+@@ -1274,9 +1315,7 @@ static int airoha_qdma_init(struct platf
+ int err, id = qdma - ð->qdma[0];
+ const char *res;
+
+- spin_lock_init(&qdma->irq_lock);
+ qdma->eth = eth;
+-
+ res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
+ if (!res)
+ return -ENOMEM;
+@@ -1286,12 +1325,7 @@ static int airoha_qdma_init(struct platf
+ return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
+ "failed to iomap qdma%d regs\n", id);
+
+- qdma->irq = platform_get_irq(pdev, 4 * id);
+- if (qdma->irq < 0)
+- return qdma->irq;
+-
+- err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
+- IRQF_SHARED, KBUILD_MODNAME, qdma);
++ err = airoha_qdma_init_irq_banks(pdev, qdma);
+ if (err)
+ return err;
+
+@@ -2782,7 +2816,7 @@ static int airoha_alloc_gdm_port(struct
+ dev->features |= dev->hw_features;
+ dev->vlan_features = dev->hw_features;
+ dev->dev.of_node = np;
+- dev->irq = qdma->irq;
++ dev->irq = qdma->irq_banks[0].irq;
+ SET_NETDEV_DEV(dev, eth->dev);
+
+ /* reserve hw queues for HTB offloading */
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -17,6 +17,7 @@
+
+ #define AIROHA_MAX_NUM_GDM_PORTS 4
+ #define AIROHA_MAX_NUM_QDMA 2
++#define AIROHA_MAX_NUM_IRQ_BANKS 1
+ #define AIROHA_MAX_DSA_PORTS 7
+ #define AIROHA_MAX_NUM_RSTS 3
+ #define AIROHA_MAX_NUM_XSI_RSTS 5
+@@ -452,17 +453,23 @@ struct airoha_flow_table_entry {
+ unsigned long cookie;
+ };
+
+-struct airoha_qdma {
+- struct airoha_eth *eth;
+- void __iomem *regs;
++struct airoha_irq_bank {
++ struct airoha_qdma *qdma;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
++};
++
++struct airoha_qdma {
++ struct airoha_eth *eth;
++ void __iomem *regs;
+
+ atomic_t users;
+
++ struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
++
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+--- a/drivers/net/ethernet/airoha/airoha_regs.h
++++ b/drivers/net/ethernet/airoha/airoha_regs.h
+@@ -423,11 +423,12 @@
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+-#define REG_INT_ENABLE(_n) \
+- (((_n) == 4) ? 0x0750 : \
+- ((_n) == 3) ? 0x0744 : \
+- ((_n) == 2) ? 0x0740 : \
+- ((_n) == 1) ? 0x002c : 0x0028)
++#define REG_INT_ENABLE(_b, _n) \
++ (((_n) == 4) ? 0x0750 + ((_b) << 5) : \
++ ((_n) == 3) ? 0x0744 + ((_b) << 5) : \
++ ((_n) == 2) ? 0x0740 + ((_b) << 5) : \
++ ((_n) == 1) ? 0x002c + ((_b) << 3) : \
++ 0x0028 + ((_b) << 3))
+
+ /* QDMA_CSR_INT_ENABLE1 */
+ #define RX15_COHERENT_INT_MASK BIT(31)
--- /dev/null
+From f252493e1835366fc25ce631c3056f900977dd11 Mon Sep 17 00:00:00 2001
+Date: Fri, 18 Apr 2025 12:40:50 +0200
+Subject: [PATCH 2/2] net: airoha: Enable multiple IRQ lines support in
+ airoha_eth driver.
+
+EN7581 ethernet SoC supports 4 programmable IRQ lines for Tx and Rx
+interrupts. Enable multiple IRQ lines support. Map Rx/Tx queues to the
+available IRQ lines using the default scheme used in the vendor SDK:
+
+- IRQ0: rx queues [0-4],[7-9],15
+- IRQ1: rx queues [21-30]
+- IRQ2: rx queues 5
+- IRQ3: rx queues 6
+
+Tx queues interrupts are managed by IRQ0.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 67 +++++---
+ drivers/net/ethernet/airoha/airoha_eth.h | 13 +-
+ drivers/net/ethernet/airoha/airoha_regs.h | 185 +++++++++++++++++-----
+ 3 files changed, 206 insertions(+), 59 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -735,7 +735,6 @@ free_frag:
+ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
+ {
+ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
+- struct airoha_irq_bank *irq_bank = &q->qdma->irq_banks[0];
+ int cur, done = 0;
+
+ do {
+@@ -743,9 +742,20 @@ static int airoha_qdma_rx_napi_poll(stru
+ done += cur;
+ } while (cur && done < budget);
+
+- if (done < budget && napi_complete(napi))
+- airoha_qdma_irq_enable(irq_bank, QDMA_INT_REG_IDX1,
+- RX_DONE_INT_MASK);
++ if (done < budget && napi_complete(napi)) {
++ struct airoha_qdma *qdma = q->qdma;
++ int i, qid = q - &qdma->q_rx[0];
++ int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
++ : QDMA_INT_REG_IDX2;
++
++ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
++ if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
++ continue;
++
++ airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
++ BIT(qid % RX_DONE_HIGH_OFFSET));
++ }
++ }
+
+ return done;
+ }
+@@ -1179,17 +1189,24 @@ static int airoha_qdma_hw_init(struct ai
+ {
+ int i;
+
+- /* clear pending irqs */
+- for (i = 0; i < ARRAY_SIZE(qdma->irq_banks[0].irqmask); i++)
++ for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
++ /* clear pending irqs */
+ airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
+-
+- /* setup irqs */
++ /* setup rx irqs */
++ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
++ INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
++ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
++ INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
++ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
++ INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
++ airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
++ INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
++ }
++ /* setup tx irqs */
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
+- INT_IDX0_MASK);
+- airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX1,
+- INT_IDX1_MASK);
++ TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
+ airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
+- INT_IDX4_MASK);
++ TX_COHERENT_HIGH_INT_MASK);
+
+ /* setup irq binding */
+ for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
+@@ -1236,6 +1253,7 @@ static irqreturn_t airoha_irq_handler(in
+ {
+ struct airoha_irq_bank *irq_bank = dev_instance;
+ struct airoha_qdma *qdma = irq_bank->qdma;
++ u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
+ u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
+ int i;
+
+@@ -1248,17 +1266,24 @@ static irqreturn_t airoha_irq_handler(in
+ if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
+ return IRQ_NONE;
+
+- if (intr[1] & RX_DONE_INT_MASK) {
+- airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1,
+- RX_DONE_INT_MASK);
++ rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
++ if (rx_intr1) {
++ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
++ rx_intr_mask |= rx_intr1;
++ }
+
+- for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
+- if (!qdma->q_rx[i].ndesc)
+- continue;
++ rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
++ if (rx_intr2) {
++ airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
++ rx_intr_mask |= (rx_intr2 << 16);
++ }
+
+- if (intr[1] & BIT(i))
+- napi_schedule(&qdma->q_rx[i].napi);
+- }
++ for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
++ if (!qdma->q_rx[i].ndesc)
++ continue;
++
++ if (rx_intr_mask & BIT(i))
++ napi_schedule(&qdma->q_rx[i].napi);
+ }
+
+ if (intr[0] & INT_TX_MASK) {
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -17,7 +17,7 @@
+
+ #define AIROHA_MAX_NUM_GDM_PORTS 4
+ #define AIROHA_MAX_NUM_QDMA 2
+-#define AIROHA_MAX_NUM_IRQ_BANKS 1
++#define AIROHA_MAX_NUM_IRQ_BANKS 4
+ #define AIROHA_MAX_DSA_PORTS 7
+ #define AIROHA_MAX_NUM_RSTS 3
+ #define AIROHA_MAX_NUM_XSI_RSTS 5
+@@ -453,6 +453,17 @@ struct airoha_flow_table_entry {
+ unsigned long cookie;
+ };
+
++/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
++#define RX_IRQ0_BANK_PIN_MASK 0x839f
++#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
++#define RX_IRQ2_BANK_PIN_MASK 0x20
++#define RX_IRQ3_BANK_PIN_MASK 0x40
++#define RX_IRQ_BANK_PIN_MASK(_n) \
++ (((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
++ ((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
++ ((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
++ RX_IRQ0_BANK_PIN_MASK)
++
+ struct airoha_irq_bank {
+ struct airoha_qdma *qdma;
+
+--- a/drivers/net/ethernet/airoha/airoha_regs.h
++++ b/drivers/net/ethernet/airoha/airoha_regs.h
+@@ -463,6 +463,26 @@
+ #define IRQ0_FULL_INT_MASK BIT(1)
+ #define IRQ0_INT_MASK BIT(0)
+
++#define RX_COHERENT_LOW_INT_MASK \
++ (RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
++ RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
++ RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK | \
++ RX9_COHERENT_INT_MASK | RX8_COHERENT_INT_MASK | \
++ RX7_COHERENT_INT_MASK | RX6_COHERENT_INT_MASK | \
++ RX5_COHERENT_INT_MASK | RX4_COHERENT_INT_MASK | \
++ RX3_COHERENT_INT_MASK | RX2_COHERENT_INT_MASK | \
++ RX1_COHERENT_INT_MASK | RX0_COHERENT_INT_MASK)
++
++#define RX_COHERENT_LOW_OFFSET __ffs(RX_COHERENT_LOW_INT_MASK)
++#define INT_RX0_MASK(_n) \
++ (((_n) << RX_COHERENT_LOW_OFFSET) & RX_COHERENT_LOW_INT_MASK)
++
++#define TX_COHERENT_LOW_INT_MASK \
++ (TX7_COHERENT_INT_MASK | TX6_COHERENT_INT_MASK | \
++ TX5_COHERENT_INT_MASK | TX4_COHERENT_INT_MASK | \
++ TX3_COHERENT_INT_MASK | TX2_COHERENT_INT_MASK | \
++ TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
++
+ #define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+@@ -471,17 +491,6 @@
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+-#define INT_IDX0_MASK \
+- (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
+- TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
+- TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
+- TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
+- RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
+- RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
+- RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
+- RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
+- RX15_COHERENT_INT_MASK | INT_TX_MASK)
+-
+ /* QDMA_CSR_INT_ENABLE2 */
+ #define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+ #define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+@@ -516,19 +525,121 @@
+ #define RX1_DONE_INT_MASK BIT(1)
+ #define RX0_DONE_INT_MASK BIT(0)
+
+-#define RX_DONE_INT_MASK \
+- (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
+- RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
+- RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
+- RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
+- RX15_DONE_INT_MASK)
+-#define INT_IDX1_MASK \
+- (RX_DONE_INT_MASK | \
+- RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
+- RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
+- RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
+- RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
+- RX15_NO_CPU_DSCP_INT_MASK)
++#define RX_NO_CPU_DSCP_LOW_INT_MASK \
++ (RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
++ RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
++ RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK | \
++ RX9_NO_CPU_DSCP_INT_MASK | RX8_NO_CPU_DSCP_INT_MASK | \
++ RX7_NO_CPU_DSCP_INT_MASK | RX6_NO_CPU_DSCP_INT_MASK | \
++ RX5_NO_CPU_DSCP_INT_MASK | RX4_NO_CPU_DSCP_INT_MASK | \
++ RX3_NO_CPU_DSCP_INT_MASK | RX2_NO_CPU_DSCP_INT_MASK | \
++ RX1_NO_CPU_DSCP_INT_MASK | RX0_NO_CPU_DSCP_INT_MASK)
++
++#define RX_DONE_LOW_INT_MASK \
++ (RX15_DONE_INT_MASK | RX14_DONE_INT_MASK | \
++ RX13_DONE_INT_MASK | RX12_DONE_INT_MASK | \
++ RX11_DONE_INT_MASK | RX10_DONE_INT_MASK | \
++ RX9_DONE_INT_MASK | RX8_DONE_INT_MASK | \
++ RX7_DONE_INT_MASK | RX6_DONE_INT_MASK | \
++ RX5_DONE_INT_MASK | RX4_DONE_INT_MASK | \
++ RX3_DONE_INT_MASK | RX2_DONE_INT_MASK | \
++ RX1_DONE_INT_MASK | RX0_DONE_INT_MASK)
++
++#define RX_NO_CPU_DSCP_LOW_OFFSET __ffs(RX_NO_CPU_DSCP_LOW_INT_MASK)
++#define INT_RX1_MASK(_n) \
++ ((((_n) << RX_NO_CPU_DSCP_LOW_OFFSET) & RX_NO_CPU_DSCP_LOW_INT_MASK) | \
++ (RX_DONE_LOW_INT_MASK & (_n)))
++
++/* QDMA_CSR_INT_ENABLE3 */
++#define RX31_NO_CPU_DSCP_INT_MASK BIT(31)
++#define RX30_NO_CPU_DSCP_INT_MASK BIT(30)
++#define RX29_NO_CPU_DSCP_INT_MASK BIT(29)
++#define RX28_NO_CPU_DSCP_INT_MASK BIT(28)
++#define RX27_NO_CPU_DSCP_INT_MASK BIT(27)
++#define RX26_NO_CPU_DSCP_INT_MASK BIT(26)
++#define RX25_NO_CPU_DSCP_INT_MASK BIT(25)
++#define RX24_NO_CPU_DSCP_INT_MASK BIT(24)
++#define RX23_NO_CPU_DSCP_INT_MASK BIT(23)
++#define RX22_NO_CPU_DSCP_INT_MASK BIT(22)
++#define RX21_NO_CPU_DSCP_INT_MASK BIT(21)
++#define RX20_NO_CPU_DSCP_INT_MASK BIT(20)
++#define RX19_NO_CPU_DSCP_INT_MASK BIT(19)
++#define RX18_NO_CPU_DSCP_INT_MASK BIT(18)
++#define RX17_NO_CPU_DSCP_INT_MASK BIT(17)
++#define RX16_NO_CPU_DSCP_INT_MASK BIT(16)
++#define RX31_DONE_INT_MASK BIT(15)
++#define RX30_DONE_INT_MASK BIT(14)
++#define RX29_DONE_INT_MASK BIT(13)
++#define RX28_DONE_INT_MASK BIT(12)
++#define RX27_DONE_INT_MASK BIT(11)
++#define RX26_DONE_INT_MASK BIT(10)
++#define RX25_DONE_INT_MASK BIT(9)
++#define RX24_DONE_INT_MASK BIT(8)
++#define RX23_DONE_INT_MASK BIT(7)
++#define RX22_DONE_INT_MASK BIT(6)
++#define RX21_DONE_INT_MASK BIT(5)
++#define RX20_DONE_INT_MASK BIT(4)
++#define RX19_DONE_INT_MASK BIT(3)
++#define RX18_DONE_INT_MASK BIT(2)
++#define RX17_DONE_INT_MASK BIT(1)
++#define RX16_DONE_INT_MASK BIT(0)
++
++#define RX_NO_CPU_DSCP_HIGH_INT_MASK \
++ (RX31_NO_CPU_DSCP_INT_MASK | RX30_NO_CPU_DSCP_INT_MASK | \
++ RX29_NO_CPU_DSCP_INT_MASK | RX28_NO_CPU_DSCP_INT_MASK | \
++ RX27_NO_CPU_DSCP_INT_MASK | RX26_NO_CPU_DSCP_INT_MASK | \
++ RX25_NO_CPU_DSCP_INT_MASK | RX24_NO_CPU_DSCP_INT_MASK | \
++ RX23_NO_CPU_DSCP_INT_MASK | RX22_NO_CPU_DSCP_INT_MASK | \
++ RX21_NO_CPU_DSCP_INT_MASK | RX20_NO_CPU_DSCP_INT_MASK | \
++ RX19_NO_CPU_DSCP_INT_MASK | RX18_NO_CPU_DSCP_INT_MASK | \
++ RX17_NO_CPU_DSCP_INT_MASK | RX16_NO_CPU_DSCP_INT_MASK)
++
++#define RX_DONE_HIGH_INT_MASK \
++ (RX31_DONE_INT_MASK | RX30_DONE_INT_MASK | \
++ RX29_DONE_INT_MASK | RX28_DONE_INT_MASK | \
++ RX27_DONE_INT_MASK | RX26_DONE_INT_MASK | \
++ RX25_DONE_INT_MASK | RX24_DONE_INT_MASK | \
++ RX23_DONE_INT_MASK | RX22_DONE_INT_MASK | \
++ RX21_DONE_INT_MASK | RX20_DONE_INT_MASK | \
++ RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
++ RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
++
++#define RX_DONE_INT_MASK (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK)
++#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
++
++#define INT_RX2_MASK(_n) \
++ ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
++ (((_n) >> RX_DONE_HIGH_OFFSET) & RX_DONE_HIGH_INT_MASK))
++
++/* QDMA_CSR_INT_ENABLE4 */
++#define RX31_COHERENT_INT_MASK BIT(31)
++#define RX30_COHERENT_INT_MASK BIT(30)
++#define RX29_COHERENT_INT_MASK BIT(29)
++#define RX28_COHERENT_INT_MASK BIT(28)
++#define RX27_COHERENT_INT_MASK BIT(27)
++#define RX26_COHERENT_INT_MASK BIT(26)
++#define RX25_COHERENT_INT_MASK BIT(25)
++#define RX24_COHERENT_INT_MASK BIT(24)
++#define RX23_COHERENT_INT_MASK BIT(23)
++#define RX22_COHERENT_INT_MASK BIT(22)
++#define RX21_COHERENT_INT_MASK BIT(21)
++#define RX20_COHERENT_INT_MASK BIT(20)
++#define RX19_COHERENT_INT_MASK BIT(19)
++#define RX18_COHERENT_INT_MASK BIT(18)
++#define RX17_COHERENT_INT_MASK BIT(17)
++#define RX16_COHERENT_INT_MASK BIT(16)
++
++#define RX_COHERENT_HIGH_INT_MASK \
++ (RX31_COHERENT_INT_MASK | RX30_COHERENT_INT_MASK | \
++ RX29_COHERENT_INT_MASK | RX28_COHERENT_INT_MASK | \
++ RX27_COHERENT_INT_MASK | RX26_COHERENT_INT_MASK | \
++ RX25_COHERENT_INT_MASK | RX24_COHERENT_INT_MASK | \
++ RX23_COHERENT_INT_MASK | RX22_COHERENT_INT_MASK | \
++ RX21_COHERENT_INT_MASK | RX20_COHERENT_INT_MASK | \
++ RX19_COHERENT_INT_MASK | RX18_COHERENT_INT_MASK | \
++ RX17_COHERENT_INT_MASK | RX16_COHERENT_INT_MASK)
++
++#define INT_RX3_MASK(_n) (RX_COHERENT_HIGH_INT_MASK & (_n))
+
+ /* QDMA_CSR_INT_ENABLE5 */
+ #define TX31_COHERENT_INT_MASK BIT(31)
+@@ -556,19 +667,19 @@
+ #define TX9_COHERENT_INT_MASK BIT(9)
+ #define TX8_COHERENT_INT_MASK BIT(8)
+
+-#define INT_IDX4_MASK \
+- (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
+- TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
+- TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
+- TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
+- TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
+- TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
+- TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
+- TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
+- TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
+- TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
+- TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
+- TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
++#define TX_COHERENT_HIGH_INT_MASK \
++ (TX31_COHERENT_INT_MASK | TX30_COHERENT_INT_MASK | \
++ TX29_COHERENT_INT_MASK | TX28_COHERENT_INT_MASK | \
++ TX27_COHERENT_INT_MASK | TX26_COHERENT_INT_MASK | \
++ TX25_COHERENT_INT_MASK | TX24_COHERENT_INT_MASK | \
++ TX23_COHERENT_INT_MASK | TX22_COHERENT_INT_MASK | \
++ TX21_COHERENT_INT_MASK | TX20_COHERENT_INT_MASK | \
++ TX19_COHERENT_INT_MASK | TX18_COHERENT_INT_MASK | \
++ TX17_COHERENT_INT_MASK | TX16_COHERENT_INT_MASK | \
++ TX15_COHERENT_INT_MASK | TX14_COHERENT_INT_MASK | \
++ TX13_COHERENT_INT_MASK | TX12_COHERENT_INT_MASK | \
++ TX11_COHERENT_INT_MASK | TX10_COHERENT_INT_MASK | \
++ TX9_COHERENT_INT_MASK | TX8_COHERENT_INT_MASK)
+
+ #define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
--- /dev/null
+From 4a7843cc8a41b9612becccc07715ed017770eb89 Mon Sep 17 00:00:00 2001
+Date: Tue, 6 May 2025 18:56:47 +0200
+Subject: [PATCH] net: airoha: Add missing field to ppe_mbox_data struct
+
+The official Airoha EN7581 firmware requires adding max_packet field in
+ppe_mbox_data struct while the unofficial one used to develop the Airoha
+EN7581 flowtable support does not require this field.
+This patch does not introduce any real backwards compatible issue since
+EN7581 fw is not publicly available in linux-firmware or other
+repositories (e.g. OpenWrt) yet and the official fw version will use this
+new layout. For this reason this change needs to be backported.
+Moreover, make explicit the padding added by the compiler introducing
+the rsv array in init_info struct.
+At the same time use u32 instead of int for init_info and set_info
+struct definitions in ppe_mbox_data struct.
+
+Fixes: 23290c7bc190d ("net: airoha: Introduce Airoha NPU support")
+Link: https://patch.msgid.link/20250506-airoha-en7581-fix-ppe_mbox_data-v5-1-29cabed6864d@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_npu.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -104,12 +104,14 @@ struct ppe_mbox_data {
+ u8 xpon_hal_api;
+ u8 wan_xsi;
+ u8 ct_joyme4;
+- int ppe_type;
+- int wan_mode;
+- int wan_sel;
++ u8 max_packet;
++ u8 rsv[3];
++ u32 ppe_type;
++ u32 wan_mode;
++ u32 wan_sel;
+ } init_info;
+ struct {
+- int func_id;
++ u32 func_id;
+ u32 size;
+ u32 data;
+ } set_info;
--- /dev/null
+From d6d2b0e1538d5c381ec0ca95afaf772c096ea5dc Mon Sep 17 00:00:00 2001
+Date: Thu, 15 May 2025 08:33:06 +0200
+Subject: [PATCH] net: airoha: Fix page recycling in airoha_qdma_rx_process()
+
+Do not recycle the page twice in airoha_qdma_rx_process routine in case
+of error. Just run dev_kfree_skb() if the skb has been allocated and marked
+for recycling. Run page_pool_put_full_page() directly if the skb has not
+been allocated yet.
+Moreover, rely on DMA address from queue entry element instead of reading
+it from the DMA descriptor for DMA syncing in airoha_qdma_rx_process().
+
+Fixes: e12182ddb6e71 ("net: airoha: Enable Rx Scatter-Gather")
+Link: https://patch.msgid.link/20250515-airoha-fix-rx-process-error-condition-v2-1-657e92c894b9@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 22 +++++++++-------------
+ 1 file changed, 9 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -636,7 +636,6 @@ static int airoha_qdma_rx_process(struct
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ u32 hash, reason, msg1 = le32_to_cpu(desc->msg1);
+- dma_addr_t dma_addr = le32_to_cpu(desc->addr);
+ struct page *page = virt_to_head_page(e->buf);
+ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+ struct airoha_gdm_port *port;
+@@ -645,22 +644,16 @@ static int airoha_qdma_rx_process(struct
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
+ break;
+
+- if (!dma_addr)
+- break;
+-
+- len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
+- if (!len)
+- break;
+-
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+- dma_sync_single_for_cpu(eth->dev, dma_addr,
++ dma_sync_single_for_cpu(eth->dev, e->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size), dir);
+
++ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
+ data_len = q->skb ? q->buf_size
+ : SKB_WITH_OVERHEAD(q->buf_size);
+- if (data_len < len)
++ if (!len || data_len < len)
+ goto free_frag;
+
+ p = airoha_qdma_get_gdm_port(eth, desc);
+@@ -723,9 +716,12 @@ static int airoha_qdma_rx_process(struct
+ q->skb = NULL;
+ continue;
+ free_frag:
+- page_pool_put_full_page(q->page_pool, page, true);
+- dev_kfree_skb(q->skb);
+- q->skb = NULL;
++ if (q->skb) {
++ dev_kfree_skb(q->skb);
++ q->skb = NULL;
++ } else {
++ page_pool_put_full_page(q->page_pool, page, true);
++ }
+ }
+ airoha_qdma_fill_rx_queue(q);
+
--- /dev/null
+From c52918744ee1e49cea86622a2633b9782446428f Mon Sep 17 00:00:00 2001
+Date: Fri, 16 May 2025 09:59:59 +0200
+Subject: [PATCH 1/3] net: airoha: npu: Move memory allocation in
+ airoha_npu_send_msg() caller
+
+Move ppe_mbox_data struct memory allocation from airoha_npu_send_msg
+routine to the caller one. This is a preliminary patch to enable wlan NPU
+offloading and flow counter stats support.
+
+---
+ drivers/net/ethernet/airoha/airoha_npu.c | 126 +++++++++++++----------
+ 1 file changed, 72 insertions(+), 54 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -124,17 +124,12 @@ static int airoha_npu_send_msg(struct ai
+ u16 core = 0; /* FIXME */
+ u32 val, offset = core << 4;
+ dma_addr_t dma_addr;
+- void *addr;
+ int ret;
+
+- addr = kmemdup(p, size, GFP_ATOMIC);
+- if (!addr)
+- return -ENOMEM;
+-
+- dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE);
++ dma_addr = dma_map_single(npu->dev, p, size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(npu->dev, dma_addr);
+ if (ret)
+- goto out;
++ return ret;
+
+ spin_lock_bh(&npu->cores[core].lock);
+
+@@ -155,8 +150,6 @@ static int airoha_npu_send_msg(struct ai
+ spin_unlock_bh(&npu->cores[core].lock);
+
+ dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE);
+-out:
+- kfree(addr);
+
+ return ret;
+ }
+@@ -261,76 +254,101 @@ static irqreturn_t airoha_npu_wdt_handle
+
+ static int airoha_npu_ppe_init(struct airoha_npu *npu)
+ {
+- struct ppe_mbox_data ppe_data = {
+- .func_type = NPU_OP_SET,
+- .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT,
+- .init_info = {
+- .ppe_type = PPE_TYPE_L2B_IPV4_IPV6,
+- .wan_mode = QDMA_WAN_ETHER,
+- },
+- };
++ struct ppe_mbox_data *ppe_data;
++ int err;
+
+- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+- sizeof(struct ppe_mbox_data));
++ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
++ if (!ppe_data)
++ return -ENOMEM;
++
++ ppe_data->func_type = NPU_OP_SET;
++ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT;
++ ppe_data->init_info.ppe_type = PPE_TYPE_L2B_IPV4_IPV6;
++ ppe_data->init_info.wan_mode = QDMA_WAN_ETHER;
++
++ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
++ sizeof(*ppe_data));
++ kfree(ppe_data);
++
++ return err;
+ }
+
+ static int airoha_npu_ppe_deinit(struct airoha_npu *npu)
+ {
+- struct ppe_mbox_data ppe_data = {
+- .func_type = NPU_OP_SET,
+- .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+- };
++ struct ppe_mbox_data *ppe_data;
++ int err;
++
++ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
++ if (!ppe_data)
++ return -ENOMEM;
+
+- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+- sizeof(struct ppe_mbox_data));
++ ppe_data->func_type = NPU_OP_SET;
++ ppe_data->func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT;
++
++ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
++ sizeof(*ppe_data));
++ kfree(ppe_data);
++
++ return err;
+ }
+
+ static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ int sram_num_entries)
+ {
+- struct ppe_mbox_data ppe_data = {
+- .func_type = NPU_OP_SET,
+- .func_id = PPE_FUNC_SET_WAIT_API,
+- .set_info = {
+- .func_id = PPE_SRAM_RESET_VAL,
+- .data = foe_addr,
+- .size = sram_num_entries,
+- },
+- };
++ struct ppe_mbox_data *ppe_data;
++ int err;
++
++ ppe_data = kzalloc(sizeof(*ppe_data), GFP_KERNEL);
++ if (!ppe_data)
++ return -ENOMEM;
++
++ ppe_data->func_type = NPU_OP_SET;
++ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
++ ppe_data->set_info.func_id = PPE_SRAM_RESET_VAL;
++ ppe_data->set_info.data = foe_addr;
++ ppe_data->set_info.size = sram_num_entries;
++
++ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
++ sizeof(*ppe_data));
++ kfree(ppe_data);
+
+- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+- sizeof(struct ppe_mbox_data));
++ return err;
+ }
+
+ static int airoha_npu_foe_commit_entry(struct airoha_npu *npu,
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash, bool ppe2)
+ {
+- struct ppe_mbox_data ppe_data = {
+- .func_type = NPU_OP_SET,
+- .func_id = PPE_FUNC_SET_WAIT_API,
+- .set_info = {
+- .data = foe_addr,
+- .size = entry_size,
+- },
+- };
++ struct ppe_mbox_data *ppe_data;
+ int err;
+
+- ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
+- : PPE_SRAM_SET_ENTRY;
++ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
++ if (!ppe_data)
++ return -ENOMEM;
++
++ ppe_data->func_type = NPU_OP_SET;
++ ppe_data->func_id = PPE_FUNC_SET_WAIT_API;
++ ppe_data->set_info.data = foe_addr;
++ ppe_data->set_info.size = entry_size;
++ ppe_data->set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY
++ : PPE_SRAM_SET_ENTRY;
+
+- err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+- sizeof(struct ppe_mbox_data));
++ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
++ sizeof(*ppe_data));
+ if (err)
+- return err;
++ goto out;
+
+- ppe_data.set_info.func_id = PPE_SRAM_SET_VAL;
+- ppe_data.set_info.data = hash;
+- ppe_data.set_info.size = sizeof(u32);
++ ppe_data->set_info.func_id = PPE_SRAM_SET_VAL;
++ ppe_data->set_info.data = hash;
++ ppe_data->set_info.size = sizeof(u32);
++
++ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
++ sizeof(*ppe_data));
++out:
++ kfree(ppe_data);
+
+- return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data,
+- sizeof(struct ppe_mbox_data));
++ return err;
+ }
+
+ struct airoha_npu *airoha_npu_get(struct device *dev)
--- /dev/null
+From b81e0f2b58be37628b2e12f8dffdd63c84573e75 Mon Sep 17 00:00:00 2001
+Date: Fri, 16 May 2025 10:00:00 +0200
+Subject: [PATCH 2/3] net: airoha: Add FLOW_CLS_STATS callback support
+
+Introduce per-flow stats accounting to the flowtable hw offload in
+the airoha_eth driver. Flow stats are split in the PPE and NPU modules:
+- PPE: accounts for high 32bit of per-flow stats
+- NPU: accounts for low 32bit of per-flow stats
+
+FLOW_CLS_STATS can be enabled or disabled at compile time.
+
+---
+ drivers/net/ethernet/airoha/Kconfig | 7 +
+ drivers/net/ethernet/airoha/airoha_eth.h | 33 +++
+ drivers/net/ethernet/airoha/airoha_npu.c | 52 +++-
+ drivers/net/ethernet/airoha/airoha_npu.h | 4 +-
+ drivers/net/ethernet/airoha/airoha_ppe.c | 269 ++++++++++++++++--
+ .../net/ethernet/airoha/airoha_ppe_debugfs.c | 9 +-
+ 6 files changed, 354 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/Kconfig
++++ b/drivers/net/ethernet/airoha/Kconfig
+@@ -24,4 +24,11 @@ config NET_AIROHA
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
++config NET_AIROHA_FLOW_STATS
++ default y
++ bool "Airoha flow stats"
++ depends on NET_AIROHA && NET_AIROHA_NPU
++ help
++ Enable Aiorha flowtable statistic counters.
++
+ endif #NET_VENDOR_AIROHA
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -50,6 +50,14 @@
+ #define PPE_NUM 2
+ #define PPE1_SRAM_NUM_ENTRIES (8 * 1024)
+ #define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES)
++#ifdef CONFIG_NET_AIROHA_FLOW_STATS
++#define PPE1_STATS_NUM_ENTRIES (4 * 1024)
++#else
++#define PPE1_STATS_NUM_ENTRIES 0
++#endif /* CONFIG_NET_AIROHA_FLOW_STATS */
++#define PPE_STATS_NUM_ENTRIES (2 * PPE1_STATS_NUM_ENTRIES)
++#define PPE1_SRAM_NUM_DATA_ENTRIES (PPE1_SRAM_NUM_ENTRIES - PPE1_STATS_NUM_ENTRIES)
++#define PPE_SRAM_NUM_DATA_ENTRIES (2 * PPE1_SRAM_NUM_DATA_ENTRIES)
+ #define PPE_DRAM_NUM_ENTRIES (16 * 1024)
+ #define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES)
+ #define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1)
+@@ -261,6 +269,8 @@ struct airoha_foe_mac_info {
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
++
++ u32 meter;
+ };
+
+ #define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24)
+@@ -296,6 +306,11 @@ struct airoha_foe_mac_info {
+ #define AIROHA_FOE_TUNNEL BIT(6)
+ #define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0)
+
++#define AIROHA_FOE_TUNNEL_MTU GENMASK(31, 16)
++#define AIROHA_FOE_ACNT_GRP3 GENMASK(15, 9)
++#define AIROHA_FOE_METER_GRP3 GENMASK(8, 5)
++#define AIROHA_FOE_METER_GRP2 GENMASK(4, 0)
++
+ struct airoha_foe_bridge {
+ u32 dest_mac_hi;
+
+@@ -379,6 +394,8 @@ struct airoha_foe_ipv6 {
+ u32 ib2;
+
+ struct airoha_foe_mac_info_common l2;
++
++ u32 meter;
+ };
+
+ struct airoha_foe_entry {
+@@ -397,6 +414,16 @@ struct airoha_foe_entry {
+ };
+ };
+
++struct airoha_foe_stats {
++ u32 bytes;
++ u32 packets;
++};
++
++struct airoha_foe_stats64 {
++ u64 bytes;
++ u64 packets;
++};
++
+ struct airoha_flow_data {
+ struct ethhdr eth;
+
+@@ -447,6 +474,7 @@ struct airoha_flow_table_entry {
+ struct hlist_node l2_subflow_node; /* PPE L2 subflow entry */
+ u32 hash;
+
++ struct airoha_foe_stats64 stats;
+ enum airoha_flow_entry_type type;
+
+ struct rhash_head node;
+@@ -523,6 +551,9 @@ struct airoha_ppe {
+ struct hlist_head *foe_flow;
+ u16 foe_check_time[PPE_NUM_ENTRIES];
+
++ struct airoha_foe_stats *foe_stats;
++ dma_addr_t foe_stats_dma;
++
+ struct dentry *debugfs_dir;
+ };
+
+@@ -582,6 +613,8 @@ int airoha_ppe_init(struct airoha_eth *e
+ void airoha_ppe_deinit(struct airoha_eth *eth);
+ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
++void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
++ struct airoha_foe_stats64 *stats);
+
+ #ifdef CONFIG_DEBUG_FS
+ int airoha_ppe_debugfs_init(struct airoha_ppe *ppe);
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -12,6 +12,7 @@
+ #include <linux/of_reserved_mem.h>
+ #include <linux/regmap.h>
+
++#include "airoha_eth.h"
+ #include "airoha_npu.h"
+
+ #define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+@@ -72,6 +73,7 @@ enum {
+ PPE_FUNC_SET_WAIT_HWNAT_INIT,
+ PPE_FUNC_SET_WAIT_HWNAT_DEINIT,
+ PPE_FUNC_SET_WAIT_API,
++ PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP,
+ };
+
+ enum {
+@@ -115,6 +117,10 @@ struct ppe_mbox_data {
+ u32 size;
+ u32 data;
+ } set_info;
++ struct {
++ u32 npu_stats_addr;
++ u32 foe_stats_addr;
++ } stats_info;
+ };
+ };
+
+@@ -351,7 +357,40 @@ out:
+ return err;
+ }
+
+-struct airoha_npu *airoha_npu_get(struct device *dev)
++static int airoha_npu_stats_setup(struct airoha_npu *npu,
++ dma_addr_t foe_stats_addr)
++{
++ int err, size = PPE_STATS_NUM_ENTRIES * sizeof(*npu->stats);
++ struct ppe_mbox_data *ppe_data;
++
++ if (!size) /* flow stats are disabled */
++ return 0;
++
++ ppe_data = kzalloc(sizeof(*ppe_data), GFP_ATOMIC);
++ if (!ppe_data)
++ return -ENOMEM;
++
++ ppe_data->func_type = NPU_OP_SET;
++ ppe_data->func_id = PPE_FUNC_SET_WAIT_FLOW_STATS_SETUP;
++ ppe_data->stats_info.foe_stats_addr = foe_stats_addr;
++
++ err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, ppe_data,
++ sizeof(*ppe_data));
++ if (err)
++ goto out;
++
++ npu->stats = devm_ioremap(npu->dev,
++ ppe_data->stats_info.npu_stats_addr,
++ size);
++ if (!npu->stats)
++ err = -ENOMEM;
++out:
++ kfree(ppe_data);
++
++ return err;
++}
++
++struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
+ {
+ struct platform_device *pdev;
+ struct device_node *np;
+@@ -389,6 +428,17 @@ struct airoha_npu *airoha_npu_get(struct
+ goto error_module_put;
+ }
+
++ if (stats_addr) {
++ int err;
++
++ err = airoha_npu_stats_setup(npu, *stats_addr);
++ if (err) {
++ dev_err(dev, "failed to allocate npu stats buffer\n");
++ npu = ERR_PTR(err);
++ goto error_module_put;
++ }
++ }
++
+ return npu;
+
+ error_module_put:
+--- a/drivers/net/ethernet/airoha/airoha_npu.h
++++ b/drivers/net/ethernet/airoha/airoha_npu.h
+@@ -17,6 +17,8 @@ struct airoha_npu {
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
++ struct airoha_foe_stats __iomem *stats;
++
+ struct {
+ int (*ppe_init)(struct airoha_npu *npu);
+ int (*ppe_deinit)(struct airoha_npu *npu);
+@@ -30,5 +32,5 @@ struct airoha_npu {
+ } ops;
+ };
+
+-struct airoha_npu *airoha_npu_get(struct device *dev);
++struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
+ void airoha_npu_put(struct airoha_npu *npu);
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -102,7 +102,7 @@ static void airoha_ppe_hw_init(struct ai
+
+ if (airoha_ppe2_is_enabled(eth)) {
+ sram_num_entries =
+- PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES);
++ PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_DATA_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+@@ -119,7 +119,7 @@ static void airoha_ppe_hw_init(struct ai
+ dram_num_entries));
+ } else {
+ sram_num_entries =
+- PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES);
++ PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_DATA_ENTRIES);
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(0),
+ PPE_SRAM_TB_NUM_ENTRY_MASK |
+ PPE_DRAM_TB_NUM_ENTRY_MASK,
+@@ -417,6 +417,77 @@ static u32 airoha_ppe_foe_get_entry_hash
+ return hash;
+ }
+
++static u32 airoha_ppe_foe_get_flow_stats_index(struct airoha_ppe *ppe, u32 hash)
++{
++ if (!airoha_ppe2_is_enabled(ppe->eth))
++ return hash;
++
++ return hash >= PPE_STATS_NUM_ENTRIES ? hash - PPE1_STATS_NUM_ENTRIES
++ : hash;
++}
++
++static void airoha_ppe_foe_flow_stat_entry_reset(struct airoha_ppe *ppe,
++ struct airoha_npu *npu,
++ int index)
++{
++ memset_io(&npu->stats[index], 0, sizeof(*npu->stats));
++ memset(&ppe->foe_stats[index], 0, sizeof(*ppe->foe_stats));
++}
++
++static void airoha_ppe_foe_flow_stats_reset(struct airoha_ppe *ppe,
++ struct airoha_npu *npu)
++{
++ int i;
++
++ for (i = 0; i < PPE_STATS_NUM_ENTRIES; i++)
++ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, i);
++}
++
++static void airoha_ppe_foe_flow_stats_update(struct airoha_ppe *ppe,
++ struct airoha_npu *npu,
++ struct airoha_foe_entry *hwe,
++ u32 hash)
++{
++ int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1);
++ u32 index, pse_port, val, *data, *ib2, *meter;
++ u8 nbq;
++
++ index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
++ if (index >= PPE_STATS_NUM_ENTRIES)
++ return;
++
++ if (type == PPE_PKT_TYPE_BRIDGE) {
++ data = &hwe->bridge.data;
++ ib2 = &hwe->bridge.ib2;
++ meter = &hwe->bridge.l2.meter;
++ } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
++ data = &hwe->ipv6.data;
++ ib2 = &hwe->ipv6.ib2;
++ meter = &hwe->ipv6.meter;
++ } else {
++ data = &hwe->ipv4.data;
++ ib2 = &hwe->ipv4.ib2;
++ meter = &hwe->ipv4.l2.meter;
++ }
++
++ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
++
++ val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
++ *data = (*data & ~AIROHA_FOE_ACTDP) |
++ FIELD_PREP(AIROHA_FOE_ACTDP, val);
++
++ val = *ib2 & (AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
++ AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
++ *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
++
++ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
++ nbq = pse_port == 1 ? 6 : 5;
++ *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
++ AIROHA_FOE_IB2_PSE_QOS);
++ *ib2 |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, 6) |
++ FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
++}
++
+ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash)
+ {
+@@ -470,6 +541,8 @@ static int airoha_ppe_foe_commit_entry(s
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+ struct airoha_eth *eth = ppe->eth;
++ struct airoha_npu *npu;
++ int err = 0;
+
+ memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1));
+ wmb();
+@@ -478,25 +551,28 @@ static int airoha_ppe_foe_commit_entry(s
+ e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts);
+ hwe->ib1 = e->ib1;
+
++ rcu_read_lock();
++
++ npu = rcu_dereference(eth->npu);
++ if (!npu) {
++ err = -ENODEV;
++ goto unlock;
++ }
++
++ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
++
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
+ bool ppe2 = airoha_ppe2_is_enabled(eth) &&
+ hash >= PPE1_SRAM_NUM_ENTRIES;
+- struct airoha_npu *npu;
+- int err = -ENODEV;
+-
+- rcu_read_lock();
+- npu = rcu_dereference(eth->npu);
+- if (npu)
+- err = npu->ops.ppe_foe_commit_entry(npu, addr,
+- sizeof(*hwe), hash,
+- ppe2);
+- rcu_read_unlock();
+
+- return err;
++ err = npu->ops.ppe_foe_commit_entry(npu, addr, sizeof(*hwe),
++ hash, ppe2);
+ }
++unlock:
++ rcu_read_unlock();
+
+- return 0;
++ return err;
+ }
+
+ static void airoha_ppe_foe_remove_flow(struct airoha_ppe *ppe,
+@@ -582,6 +658,7 @@ airoha_ppe_foe_commit_subflow_entry(stru
+ l2->common.etype = ETH_P_IPV6;
+
+ hwe.bridge.ib2 = e->data.bridge.ib2;
++ hwe.bridge.data = e->data.bridge.data;
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
+
+ return 0;
+@@ -681,6 +758,98 @@ static int airoha_ppe_foe_flow_commit_en
+ return 0;
+ }
+
++static int airoha_ppe_get_entry_idle_time(struct airoha_ppe *ppe, u32 ib1)
++{
++ u32 state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
++ u32 ts, ts_mask, now = airoha_ppe_get_timestamp(ppe);
++ int idle;
++
++ if (state == AIROHA_FOE_STATE_BIND) {
++ ts = FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, ib1);
++ ts_mask = AIROHA_FOE_IB1_BIND_TIMESTAMP;
++ } else {
++ ts = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, ib1);
++ now = FIELD_GET(AIROHA_FOE_IB1_UNBIND_TIMESTAMP, now);
++ ts_mask = AIROHA_FOE_IB1_UNBIND_TIMESTAMP;
++ }
++ idle = now - ts;
++
++ return idle < 0 ? idle + ts_mask + 1 : idle;
++}
++
++static void
++airoha_ppe_foe_flow_l2_entry_update(struct airoha_ppe *ppe,
++ struct airoha_flow_table_entry *e)
++{
++ int min_idle = airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
++ struct airoha_flow_table_entry *iter;
++ struct hlist_node *n;
++
++ lockdep_assert_held(&ppe_lock);
++
++ hlist_for_each_entry_safe(iter, n, &e->l2_flows, l2_subflow_node) {
++ struct airoha_foe_entry *hwe;
++ u32 ib1, state;
++ int idle;
++
++ hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
++ ib1 = READ_ONCE(hwe->ib1);
++
++ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
++ if (state != AIROHA_FOE_STATE_BIND) {
++ iter->hash = 0xffff;
++ airoha_ppe_foe_remove_flow(ppe, iter);
++ continue;
++ }
++
++ idle = airoha_ppe_get_entry_idle_time(ppe, ib1);
++ if (idle >= min_idle)
++ continue;
++
++ min_idle = idle;
++ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP;
++ e->data.ib1 |= ib1 & AIROHA_FOE_IB1_BIND_TIMESTAMP;
++ }
++}
++
++static void airoha_ppe_foe_flow_entry_update(struct airoha_ppe *ppe,
++ struct airoha_flow_table_entry *e)
++{
++ struct airoha_foe_entry *hwe_p, hwe = {};
++
++ spin_lock_bh(&ppe_lock);
++
++ if (e->type == FLOW_TYPE_L2) {
++ airoha_ppe_foe_flow_l2_entry_update(ppe, e);
++ goto unlock;
++ }
++
++ if (e->hash == 0xffff)
++ goto unlock;
++
++ hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
++ if (!hwe_p)
++ goto unlock;
++
++ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
++ if (!airoha_ppe_foe_compare_entry(e, &hwe)) {
++ e->hash = 0xffff;
++ goto unlock;
++ }
++
++ e->data.ib1 = hwe.ib1;
++unlock:
++ spin_unlock_bh(&ppe_lock);
++}
++
++static int airoha_ppe_entry_idle_time(struct airoha_ppe *ppe,
++ struct airoha_flow_table_entry *e)
++{
++ airoha_ppe_foe_flow_entry_update(ppe, e);
++
++ return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
++}
++
+ static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+ {
+@@ -896,6 +1065,60 @@ static int airoha_ppe_flow_offload_destr
+ return 0;
+ }
+
++void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
++ struct airoha_foe_stats64 *stats)
++{
++ u32 index = airoha_ppe_foe_get_flow_stats_index(ppe, hash);
++ struct airoha_eth *eth = ppe->eth;
++ struct airoha_npu *npu;
++
++ if (index >= PPE_STATS_NUM_ENTRIES)
++ return;
++
++ rcu_read_lock();
++
++ npu = rcu_dereference(eth->npu);
++ if (npu) {
++ u64 packets = ppe->foe_stats[index].packets;
++ u64 bytes = ppe->foe_stats[index].bytes;
++ struct airoha_foe_stats npu_stats;
++
++ memcpy_fromio(&npu_stats, &npu->stats[index],
++ sizeof(*npu->stats));
++ stats->packets = packets << 32 | npu_stats.packets;
++ stats->bytes = bytes << 32 | npu_stats.bytes;
++ }
++
++ rcu_read_unlock();
++}
++
++static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
++ struct flow_cls_offload *f)
++{
++ struct airoha_eth *eth = port->qdma->eth;
++ struct airoha_flow_table_entry *e;
++ u32 idle;
++
++ e = rhashtable_lookup(ð->flow_table, &f->cookie,
++ airoha_flow_table_params);
++ if (!e)
++ return -ENOENT;
++
++ idle = airoha_ppe_entry_idle_time(eth->ppe, e);
++ f->stats.lastused = jiffies - idle * HZ;
++
++ if (e->hash != 0xffff) {
++ struct airoha_foe_stats64 stats = {};
++
++ airoha_ppe_foe_entry_get_stats(eth->ppe, e->hash, &stats);
++ f->stats.pkts += (stats.packets - e->stats.packets);
++ f->stats.bytes += (stats.bytes - e->stats.bytes);
++ e->stats = stats;
++ }
++
++ return 0;
++}
++
+ static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
+ struct flow_cls_offload *f)
+ {
+@@ -904,6 +1127,8 @@ static int airoha_ppe_flow_offload_cmd(s
+ return airoha_ppe_flow_offload_replace(port, f);
+ case FLOW_CLS_DESTROY:
+ return airoha_ppe_flow_offload_destroy(port, f);
++ case FLOW_CLS_STATS:
++ return airoha_ppe_flow_offload_stats(port, f);
+ default:
+ break;
+ }
+@@ -929,11 +1154,12 @@ static int airoha_ppe_flush_sram_entries
+
+ static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth)
+ {
+- struct airoha_npu *npu = airoha_npu_get(eth->dev);
++ struct airoha_npu *npu = airoha_npu_get(eth->dev,
++ ð->ppe->foe_stats_dma);
+
+ if (IS_ERR(npu)) {
+ request_module("airoha-npu");
+- npu = airoha_npu_get(eth->dev);
++ npu = airoha_npu_get(eth->dev, ð->ppe->foe_stats_dma);
+ }
+
+ return npu;
+@@ -956,6 +1182,8 @@ static int airoha_ppe_offload_setup(stru
+ if (err)
+ goto error_npu_put;
+
++ airoha_ppe_foe_flow_stats_reset(eth->ppe, npu);
++
+ rcu_assign_pointer(eth->npu, npu);
+ synchronize_rcu();
+
+@@ -1027,6 +1255,15 @@ int airoha_ppe_init(struct airoha_eth *e
+ if (!ppe->foe_flow)
+ return -ENOMEM;
+
++ foe_size = PPE_STATS_NUM_ENTRIES * sizeof(*ppe->foe_stats);
++ if (foe_size) {
++ ppe->foe_stats = dmam_alloc_coherent(eth->dev, foe_size,
++ &ppe->foe_stats_dma,
++ GFP_KERNEL);
++ if (!ppe->foe_stats)
++ return -ENOMEM;
++ }
++
+ err = rhashtable_init(ð->flow_table, &airoha_flow_table_params);
+ if (err)
+ return err;
+--- a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c
+@@ -61,6 +61,7 @@ static int airoha_ppe_debugfs_foe_show(s
+ u16 *src_port = NULL, *dest_port = NULL;
+ struct airoha_foe_mac_info_common *l2;
+ unsigned char h_source[ETH_ALEN] = {};
++ struct airoha_foe_stats64 stats = {};
+ unsigned char h_dest[ETH_ALEN];
+ struct airoha_foe_entry *hwe;
+ u32 type, state, ib2, data;
+@@ -144,14 +145,18 @@ static int airoha_ppe_debugfs_foe_show(s
+ cpu_to_be16(hwe->ipv4.l2.src_mac_lo);
+ }
+
++ airoha_ppe_foe_entry_get_stats(ppe, i, &stats);
++
+ *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo);
+ *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x"
+- " vlan=%d,%d ib1=%08x ib2=%08x\n",
++ " vlan=%d,%d ib1=%08x ib2=%08x"
++ " packets=%llu bytes=%llu\n",
+ h_source, h_dest, l2->etype, data,
+- l2->vlan1, l2->vlan2, hwe->ib1, ib2);
++ l2->vlan1, l2->vlan2, hwe->ib1, ib2,
++ stats.packets, stats.bytes);
+ }
+
+ return 0;
--- /dev/null
+From a98326c151ea3d92e9496858cc2dacccd0870941 Mon Sep 17 00:00:00 2001
+Date: Fri, 16 May 2025 10:00:01 +0200
+Subject: [PATCH 3/3] net: airoha: ppe: Disable packet keepalive
+
+Since netfilter flowtable entries are now refreshed by flow-stats
+polling, we can disable hw packet keepalive used to periodically send
+packets belonging to offloaded flows to the kernel in order to refresh
+flowtable entries.
+
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -84,6 +84,7 @@ static void airoha_ppe_hw_init(struct ai
+
+ airoha_fe_rmw(eth, REG_PPE_TB_CFG(i),
+ PPE_TB_CFG_SEARCH_MISS_MASK |
++ PPE_TB_CFG_KEEPALIVE_MASK |
+ PPE_TB_ENTRY_SIZE_MASK,
+ FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) |
+ FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0));
--- /dev/null
+From 09aa788f98da3e2f41ce158cc691d6d52e808bc9 Mon Sep 17 00:00:00 2001
+Date: Wed, 21 May 2025 09:16:37 +0200
+Subject: [PATCH 1/3] net: airoha: Do not store hfwd references in airoha_qdma
+ struct
+
+Since hfwd descriptor and buffer queues are allocated via
+dmam_alloc_coherent() we do not need to store their references
+in airoha_qdma struct. This patch does not introduce any logical changes,
+just code clean-up.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 8 ++------
+ drivers/net/ethernet/airoha/airoha_eth.h | 6 ------
+ 2 files changed, 2 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -1078,17 +1078,13 @@ static int airoha_qdma_init_hfwd_queues(
+ int size;
+
+ size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
+- qdma->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+- GFP_KERNEL);
+- if (!qdma->hfwd.desc)
++ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+
+ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+- qdma->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+- GFP_KERNEL);
+- if (!qdma->hfwd.q)
++ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -513,12 +513,6 @@ struct airoha_qdma {
+
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+-
+- /* descriptor and packet buffers for qdma hw forward */
+- struct {
+- void *desc;
+- void *q;
+- } hfwd;
+ };
+
+ struct airoha_gdm_port {
--- /dev/null
+From 3a1ce9e3d01bbf3912c3e3f81cb554d558eb715b Mon Sep 17 00:00:00 2001
+Date: Wed, 21 May 2025 09:16:38 +0200
+Subject: [PATCH 2/3] net: airoha: Add the capability to allocate hwfd buffers
+ via reserved-memory
+
+In some configurations QDMA blocks require a contiguous block of
+system memory for hwfd buffers queue. Introduce the capability to allocate
+hw buffers forwarding queue via the reserved-memory DTS property instead of
+running dmam_alloc_coherent().
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 33 +++++++++++++++++++++---
+ 1 file changed, 30 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -5,6 +5,7 @@
+ */
+ #include <linux/of.h>
+ #include <linux/of_net.h>
++#include <linux/of_reserved_mem.h>
+ #include <linux/platform_device.h>
+ #include <linux/tcp.h>
+ #include <linux/u64_stats_sync.h>
+@@ -1073,9 +1074,11 @@ static void airoha_qdma_cleanup_tx_queue
+ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
+ {
+ struct airoha_eth *eth = qdma->eth;
++ int id = qdma - ð->qdma[0];
+ dma_addr_t dma_addr;
++ const char *name;
++ int size, index;
+ u32 status;
+- int size;
+
+ size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+@@ -1083,10 +1086,34 @@ static int airoha_qdma_init_hfwd_queues(
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+
+- size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+- if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
++ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
++ if (!name)
+ return -ENOMEM;
+
++ index = of_property_match_string(eth->dev->of_node,
++ "memory-region-names", name);
++ if (index >= 0) {
++ struct reserved_mem *rmem;
++ struct device_node *np;
++
++ /* Consume reserved memory for hw forwarding buffers queue if
++ * available in the DTS
++ */
++ np = of_parse_phandle(eth->dev->of_node, "memory-region",
++ index);
++ if (!np)
++ return -ENODEV;
++
++ rmem = of_reserved_mem_lookup(np);
++ of_node_put(np);
++ dma_addr = rmem->base;
++ } else {
++ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
++ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
++ GFP_KERNEL))
++ return -ENOMEM;
++ }
++
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
--- /dev/null
+From c683e378c0907e66cee939145edf936c254ff1e3 Mon Sep 17 00:00:00 2001
+Date: Wed, 21 May 2025 09:16:39 +0200
+Subject: [PATCH 3/3] net: airoha: Add the capability to allocate hfwd
+ descriptors in SRAM
+
+In order to improve packet processing and packet forwarding
+performances, EN7581 SoC supports consuming SRAM instead of DRAM for
+hw forwarding descriptors queue.
+For downlink hw accelerated traffic request to consume SRAM memory
+for hw forwarding descriptors queue.
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 11 +----------
+ drivers/net/ethernet/airoha/airoha_eth.h | 9 +++++++++
+ drivers/net/ethernet/airoha/airoha_ppe.c | 6 ++++++
+ 3 files changed, 16 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -71,15 +71,6 @@ static void airoha_qdma_irq_disable(stru
+ airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
+ }
+
+-static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
+-{
+- /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
+- * GDM{2,3,4} can be used as wan port connected to an external
+- * phy module.
+- */
+- return port->id == 1;
+-}
+-
+ static void airoha_set_macaddr(struct airoha_gdm_port *port, const u8 *addr)
+ {
+ struct airoha_eth *eth = port->qdma->eth;
+@@ -1125,7 +1116,7 @@ static int airoha_qdma_init_hfwd_queues(
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
+ HW_FWD_DESC_NUM_MASK,
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
+- LMGR_INIT_START);
++ LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
+
+ return read_poll_timeout(airoha_qdma_rr, status,
+ !(status & LMGR_INIT_START), USEC_PER_MSEC,
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -597,6 +597,15 @@ u32 airoha_rmw(void __iomem *base, u32 o
+ #define airoha_qdma_clear(qdma, offset, val) \
+ airoha_rmw((qdma)->regs, (offset), (val), 0)
+
++static inline bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
++{
++ /* GDM1 port on EN7581 SoC is connected to the lan dsa switch.
++ * GDM{2,3,4} can be used as wan port connected to an external
++ * phy module.
++ */
++ return port->id == 1;
++}
++
+ bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port);
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -251,6 +251,12 @@ static int airoha_ppe_foe_entry_prepare(
+ else
+ pse_port = 2; /* uplink relies on GDM2 loopback */
+ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
++
++ /* For downlink traffic consume SRAM memory for hw forwarding
++ * descriptors queue.
++ */
++ if (airhoa_is_lan_gdm_port(port))
++ val |= AIROHA_FOE_IB2_FAST_PATH;
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
--- /dev/null
+From c59783780c8ad66f6076a9a7c74df3e006e29519 Mon Sep 17 00:00:00 2001
+Date: Sat, 24 May 2025 09:29:11 +0200
+Subject: [PATCH] net: airoha: Fix an error handling path in
+ airoha_alloc_gdm_port()
+
+If register_netdev() fails, the error handling path of the probe will not
+free the memory allocated by the previous airoha_metadata_dst_alloc() call
+because port->dev->reg_state will not be NETREG_REGISTERED.
+
+So, an explicit airoha_metadata_dst_free() call is needed in this case to
+avoid a memory leak.
+
+Fixes: af3cf757d5c9 ("net: airoha: Move DSA tag in DMA descriptor")
+Link: https://patch.msgid.link/1b94b91345017429ed653e2f05d25620dc2823f9.1746715755.git.christophe.jaillet@wanadoo.fr
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -2881,7 +2881,15 @@ static int airoha_alloc_gdm_port(struct
+ if (err)
+ return err;
+
+- return register_netdev(dev);
++ err = register_netdev(dev);
++ if (err)
++ goto free_metadata_dst;
++
++ return 0;
++
++free_metadata_dst:
++ airoha_metadata_dst_free(port);
++ return err;
+ }
+
+ static int airoha_probe(struct platform_device *pdev)
--- /dev/null
+From a869d3a5eb011a9cf9bd864f31f5cf27362de8c7 Mon Sep 17 00:00:00 2001
+Date: Mon, 2 Jun 2025 12:55:37 +0200
+Subject: [PATCH 1/3] net: airoha: Initialize PPE UPDMEM source-mac table
+
+UPDMEM source-mac table is a key-value map used to store devices mac
+addresses according to the port identifier. UPDMEM source mac table is
+used during IPv6 traffic hw acceleration since PPE entries, for space
+constraints, do not contain the full source mac address but just the
+identifier in the UPDMEM source-mac table.
+Configure UPDMEM source-mac table with device mac addresses and set
+the source-mac ID field for PPE IPv6 entries in order to select the
+proper device mac address as source mac for L3 IPv6 hw accelerated traffic.
+
+Fixes: 00a7678310fe ("net: airoha: Introduce flowtable offload support")
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 2 ++
+ drivers/net/ethernet/airoha/airoha_eth.h | 1 +
+ drivers/net/ethernet/airoha/airoha_ppe.c | 26 ++++++++++++++++++++++-
+ drivers/net/ethernet/airoha/airoha_regs.h | 10 +++++++++
+ 4 files changed, 38 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -84,6 +84,8 @@ static void airoha_set_macaddr(struct ai
+ val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_FE_MAC_LMIN(reg), val);
+ airoha_fe_wr(eth, REG_FE_MAC_LMAX(reg), val);
++
++ airoha_ppe_init_upd_mem(port);
+ }
+
+ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -614,6 +614,7 @@ void airoha_ppe_check_skb(struct airoha_
+ int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
+ int airoha_ppe_init(struct airoha_eth *eth);
+ void airoha_ppe_deinit(struct airoha_eth *eth);
++void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
+ struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+ u32 hash);
+ void airoha_ppe_foe_entry_get_stats(struct airoha_ppe *ppe, u32 hash,
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -223,6 +223,7 @@ static int airoha_ppe_foe_entry_prepare(
+ int dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+ u32 qdata, ports_pad, val;
++ u8 smac_id = 0xf;
+
+ memset(hwe, 0, sizeof(*hwe));
+
+@@ -257,6 +258,8 @@ static int airoha_ppe_foe_entry_prepare(
+ */
+ if (airhoa_is_lan_gdm_port(port))
+ val |= AIROHA_FOE_IB2_FAST_PATH;
++
++ smac_id = port->id;
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+@@ -291,7 +294,7 @@ static int airoha_ppe_foe_entry_prepare(
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
+ } else {
+- l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf);
++ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
+ }
+
+ if (data->vlan.num) {
+@@ -1238,6 +1241,27 @@ void airoha_ppe_check_skb(struct airoha_
+ airoha_ppe_foe_insert_entry(ppe, skb, hash);
+ }
+
++void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
++{
++ struct airoha_eth *eth = port->qdma->eth;
++ struct net_device *dev = port->dev;
++ const u8 *addr = dev->dev_addr;
++ u32 val;
++
++ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
++ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
++ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
++ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
++ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
++
++ val = (addr[0] << 8) | addr[1];
++ airoha_fe_wr(eth, REG_UPDMEM_DATA(0), val);
++ airoha_fe_wr(eth, REG_UPDMEM_CTRL(0),
++ FIELD_PREP(PPE_UPDMEM_ADDR_MASK, port->id) |
++ FIELD_PREP(PPE_UPDMEM_OFFSET_MASK, 1) |
++ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
++}
++
+ int airoha_ppe_init(struct airoha_eth *eth)
+ {
+ struct airoha_ppe *ppe;
+--- a/drivers/net/ethernet/airoha/airoha_regs.h
++++ b/drivers/net/ethernet/airoha/airoha_regs.h
+@@ -313,6 +313,16 @@
+ #define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320)
+ #define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2))
+
++#define REG_UPDMEM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x370)
++#define PPE_UPDMEM_ACK_MASK BIT(31)
++#define PPE_UPDMEM_ADDR_MASK GENMASK(11, 8)
++#define PPE_UPDMEM_OFFSET_MASK GENMASK(7, 4)
++#define PPE_UPDMEM_SEL_MASK GENMASK(3, 2)
++#define PPE_UPDMEM_WR_MASK BIT(1)
++#define PPE_UPDMEM_REQ_MASK BIT(0)
++
++#define REG_UPDMEM_DATA(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x374)
++
+ #define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+ #define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+ #define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
--- /dev/null
+From 504a577c9b000f9e0e99e1b28616fb4eb369e1ef Mon Sep 17 00:00:00 2001
+Date: Mon, 2 Jun 2025 12:55:38 +0200
+Subject: [PATCH 2/3] net: airoha: Fix IPv6 hw acceleration in bridge mode
+
+ib2 and airoha_foe_mac_info_common have not the same offsets in
+airoha_foe_bridge and airoha_foe_ipv6 structures. Current codebase does
+not accelerate IPv6 traffic in bridge mode since ib2 and l2 info are not
+set properly copying airoha_foe_bridge struct into airoha_foe_ipv6 one
+in airoha_ppe_foe_commit_subflow_entry routine.
+Fix IPv6 hw acceleration in bridge mode resolving ib2 and
+airoha_foe_mac_info_common overwrite in
+airoha_ppe_foe_commit_subflow_entry() and configuring them with proper
+values.
+
+Fixes: cd53f622611f ("net: airoha: Add L2 hw acceleration support")
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 23 ++++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -639,7 +639,6 @@ airoha_ppe_foe_commit_subflow_entry(stru
+ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
+ struct airoha_foe_entry *hwe_p, hwe;
+ struct airoha_flow_table_entry *f;
+- struct airoha_foe_mac_info *l2;
+ int type;
+
+ hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
+@@ -656,18 +655,20 @@ airoha_ppe_foe_commit_subflow_entry(stru
+
+ memcpy(&hwe, hwe_p, sizeof(*hwe_p));
+ hwe.ib1 = (hwe.ib1 & mask) | (e->data.ib1 & ~mask);
+- l2 = &hwe.bridge.l2;
+- memcpy(l2, &e->data.bridge.l2, sizeof(*l2));
+
+ type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe.ib1);
+- if (type == PPE_PKT_TYPE_IPV4_HNAPT)
+- memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
+- sizeof(hwe.ipv4.new_tuple));
+- else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T &&
+- l2->common.etype == ETH_P_IP)
+- l2->common.etype = ETH_P_IPV6;
++ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
++ memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
++ hwe.ipv6.ib2 = e->data.bridge.ib2;
++ } else {
++ memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
++ sizeof(hwe.bridge.l2));
++ hwe.bridge.ib2 = e->data.bridge.ib2;
++ if (type == PPE_PKT_TYPE_IPV4_HNAPT)
++ memcpy(&hwe.ipv4.new_tuple, &hwe.ipv4.orig_tuple,
++ sizeof(hwe.ipv4.new_tuple));
++ }
+
+- hwe.bridge.ib2 = e->data.bridge.ib2;
+ hwe.bridge.data = e->data.bridge.data;
+ airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
+
--- /dev/null
+From c86fac5365d3a068422beeb508f2741f1a2d734d Mon Sep 17 00:00:00 2001
+Date: Mon, 2 Jun 2025 12:55:39 +0200
+Subject: [PATCH 3/3] net: airoha: Fix smac_id configuration in bridge mode
+
+Set PPE entry smac_id field to 0xf in airoha_ppe_foe_commit_subflow_entry
+routine for IPv6 traffic in order to instruct the hw to keep original
+source mac address for IPv6 hw accelerated traffic in bridge mode.
+
+Fixes: cd53f622611f ("net: airoha: Add L2 hw acceleration support")
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -660,6 +660,11 @@ airoha_ppe_foe_commit_subflow_entry(stru
+ if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+ memcpy(&hwe.ipv6.l2, &e->data.bridge.l2, sizeof(hwe.ipv6.l2));
+ hwe.ipv6.ib2 = e->data.bridge.ib2;
++ /* setting smac_id to 0xf instruct the hw to keep original
++ * source mac address
++ */
++ hwe.ipv6.l2.src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID,
++ 0xf);
+ } else {
+ memcpy(&hwe.bridge.l2, &e->data.bridge.l2,
+ sizeof(hwe.bridge.l2));
--- /dev/null
+From 0097c4195b1d0ca57d15979626c769c74747b5a0 Mon Sep 17 00:00:00 2001
+Date: Mon, 9 Jun 2025 22:28:40 +0200
+Subject: [PATCH] net: airoha: Add PPPoE offload support
+
+Introduce flowtable hw acceleration for PPPoE traffic.
+
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 31 ++++++++++++++++++------
+ 1 file changed, 23 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -232,6 +232,7 @@ static int airoha_ppe_foe_entry_prepare(
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) |
+ FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) |
++ FIELD_PREP(AIROHA_FOE_IB1_BIND_PPPOE, data->pppoe.num) |
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+@@ -281,33 +282,42 @@ static int airoha_ppe_foe_entry_prepare(
+ hwe->ipv6.data = qdata;
+ hwe->ipv6.ib2 = val;
+ l2 = &hwe->ipv6.l2;
++ l2->etype = ETH_P_IPV6;
+ } else {
+ hwe->ipv4.data = qdata;
+ hwe->ipv4.ib2 = val;
+ l2 = &hwe->ipv4.l2.common;
++ l2->etype = ETH_P_IP;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest);
+ l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4);
+ if (type <= PPE_PKT_TYPE_IPV4_DSLITE) {
++ struct airoha_foe_mac_info *mac_info;
++
+ l2->src_mac_hi = get_unaligned_be32(data->eth.h_source);
+ hwe->ipv4.l2.src_mac_lo =
+ get_unaligned_be16(data->eth.h_source + 4);
++
++ mac_info = (struct airoha_foe_mac_info *)l2;
++ mac_info->pppoe_id = data->pppoe.sid;
+ } else {
+- l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id);
++ l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, smac_id) |
++ FIELD_PREP(AIROHA_FOE_MAC_PPPOE_ID,
++ data->pppoe.sid);
+ }
+
+ if (data->vlan.num) {
+- l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0;
+ l2->vlan1 = data->vlan.hdr[0].id;
+ if (data->vlan.num == 2)
+ l2->vlan2 = data->vlan.hdr[1].id;
+- } else if (dsa_port >= 0) {
+- l2->etype = BIT(15) | BIT(dsa_port);
+- } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) {
+- l2->etype = ETH_P_IPV6;
+- } else {
+- l2->etype = ETH_P_IP;
++ }
++
++ if (dsa_port >= 0) {
++ l2->etype = BIT(dsa_port);
++ l2->etype |= !data->vlan.num ? BIT(15) : 0;
++ } else if (data->pppoe.num) {
++ l2->etype = ETH_P_PPP_SES;
+ }
+
+ return 0;
+@@ -957,6 +967,11 @@ static int airoha_ppe_flow_offload_repla
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
++ if (data.pppoe.num == 1 || data.vlan.num == 2)
++ return -EOPNOTSUPP;
++
++ data.pppoe.sid = act->pppoe.sid;
++ data.pppoe.num++;
+ break;
+ default:
+ return -EOPNOTSUPP;
--- /dev/null
+From f478d68b653323b691280b40fbd3b8ca1ac75aa2 Mon Sep 17 00:00:00 2001
+Date: Mon, 9 Jun 2025 22:40:35 +0200
+Subject: [PATCH] net: airoha: Enable RX queues 16-31
+
+Fix RX_DONE_INT_MASK definition in order to enable RX queues 16-31.
+
+Fixes: f252493e18353 ("net: airoha: Enable multiple IRQ lines support in airoha_eth driver.")
+---
+ drivers/net/ethernet/airoha/airoha_regs.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_regs.h
++++ b/drivers/net/ethernet/airoha/airoha_regs.h
+@@ -614,8 +614,9 @@
+ RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
+ RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
+
+-#define RX_DONE_INT_MASK (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK)
+ #define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
++#define RX_DONE_INT_MASK \
++ ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
+
+ #define INT_RX2_MASK(_n) \
+ ((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
--- /dev/null
+From 78bd03ee1f20a267d2c218884b66041b3508ac9c Mon Sep 17 00:00:00 2001
+Date: Wed, 18 Jun 2025 09:37:40 +0200
+Subject: [PATCH] net: airoha: Always check return value from
+ airoha_ppe_foe_get_entry()
+
+airoha_ppe_foe_get_entry routine can return NULL, so check the returned
+pointer is not NULL in airoha_ppe_foe_flow_l2_entry_update()
+
+Fixes: b81e0f2b58be3 ("net: airoha: Add FLOW_CLS_STATS callback support")
+Link: https://patch.msgid.link/20250618-check-ret-from-airoha_ppe_foe_get_entry-v2-1-068dcea3cc66@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -819,8 +819,10 @@ airoha_ppe_foe_flow_l2_entry_update(stru
+ int idle;
+
+ hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
+- ib1 = READ_ONCE(hwe->ib1);
++ if (!hwe)
++ continue;
+
++ ib1 = READ_ONCE(hwe->ib1);
+ state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, ib1);
+ if (state != AIROHA_FOE_STATE_BIND) {
+ iter->hash = 0xffff;
--- /dev/null
+From edf8afeecfbb0b8c1a2edb8c8892d2f759d35321 Mon Sep 17 00:00:00 2001
+Date: Thu, 19 Jun 2025 09:07:24 +0200
+Subject: [PATCH 1/2] net: airoha: Compute number of descriptors according to
+ reserved memory size
+
+In order to not exceed the reserved memory size for hwfd buffers,
+compute the number of hwfd buffers/descriptors according to the
+reserved memory size and the size of each hwfd buffer (2KB).
+
+Fixes: 3a1ce9e3d01b ("net: airoha: Add the capability to allocate hwfd buffers via reserved-memory")
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -1066,19 +1066,13 @@ static void airoha_qdma_cleanup_tx_queue
+
+ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
+ {
++ int size, index, num_desc = HW_DSCP_NUM;
+ struct airoha_eth *eth = qdma->eth;
+ int id = qdma - ð->qdma[0];
+ dma_addr_t dma_addr;
+ const char *name;
+- int size, index;
+ u32 status;
+
+- size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
+- if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
+- return -ENOMEM;
+-
+- airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+-
+ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
+ if (!name)
+ return -ENOMEM;
+@@ -1100,8 +1094,12 @@ static int airoha_qdma_init_hfwd_queues(
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+ dma_addr = rmem->base;
++ /* Compute the number of hw descriptors according to the
++ * reserved memory size and the payload buffer size
++ */
++ num_desc = rmem->size / AIROHA_MAX_PACKET_SIZE;
+ } else {
+- size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
++ size = AIROHA_MAX_PACKET_SIZE * num_desc;
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL))
+ return -ENOMEM;
+@@ -1109,6 +1107,11 @@ static int airoha_qdma_init_hfwd_queues(
+
+ airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
+
++ size = num_desc * sizeof(struct airoha_qdma_fwd_desc);
++ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr, GFP_KERNEL))
++ return -ENOMEM;
++
++ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
+ HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
+@@ -1117,7 +1120,7 @@ static int airoha_qdma_init_hfwd_queues(
+ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
+ HW_FWD_DESC_NUM_MASK,
+- FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
++ FIELD_PREP(HW_FWD_DESC_NUM_MASK, num_desc) |
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK);
+
+ return read_poll_timeout(airoha_qdma_rr, status,
--- /dev/null
+From 7b46bdaec00a675f6fac9d0b01a2105b5746ebe9 Mon Sep 17 00:00:00 2001
+Date: Thu, 19 Jun 2025 09:07:25 +0200
+Subject: [PATCH 2/2] net: airoha: Differentiate hwfd buffer size for QDMA0 and
+ QDMA1
+
+EN7581 SoC allows configuring the size and the number of buffers in
+hwfd payload queue for both QDMA0 and QDMA1.
+In order to reduce the required DRAM used for hwfd buffers queues and
+decrease the memory footprint, differentiate hwfd buffer size for QDMA0
+and QDMA1 and reduce hwfd buffer size to 1KB for QDMA1 (WAN) while
+maintaining 2KB for QDMA0 (LAN).
+
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -1069,14 +1069,15 @@ static int airoha_qdma_init_hfwd_queues(
+ int size, index, num_desc = HW_DSCP_NUM;
+ struct airoha_eth *eth = qdma->eth;
+ int id = qdma - ð->qdma[0];
++ u32 status, buf_size;
+ dma_addr_t dma_addr;
+ const char *name;
+- u32 status;
+
+ name = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d-buf", id);
+ if (!name)
+ return -ENOMEM;
+
++ buf_size = id ? AIROHA_MAX_PACKET_SIZE / 2 : AIROHA_MAX_PACKET_SIZE;
+ index = of_property_match_string(eth->dev->of_node,
+ "memory-region-names", name);
+ if (index >= 0) {
+@@ -1097,9 +1098,9 @@ static int airoha_qdma_init_hfwd_queues(
+ /* Compute the number of hw descriptors according to the
+ * reserved memory size and the payload buffer size
+ */
+- num_desc = rmem->size / AIROHA_MAX_PACKET_SIZE;
++ num_desc = div_u64(rmem->size, buf_size);
+ } else {
+- size = AIROHA_MAX_PACKET_SIZE * num_desc;
++ size = buf_size * num_desc;
+ if (!dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL))
+ return -ENOMEM;
+@@ -1112,9 +1113,10 @@ static int airoha_qdma_init_hfwd_queues(
+ return -ENOMEM;
+
+ airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
++ /* QDMA0: 2KB. QDMA1: 1KB */
+ airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
+ HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
+- FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
++ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, !!id));
+ airoha_qdma_rmw(qdma, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
+ airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
--- /dev/null
+From 38358fa3cc8e16c6862a3e5c5c233f9f652e3a6d Mon Sep 17 00:00:00 2001
+Date: Thu, 31 Jul 2025 12:29:08 +0200
+Subject: [PATCH] net: airoha: Fix PPE table access in
+ airoha_ppe_debugfs_foe_show()
+
+In order to avoid any possible race we need to hold the ppe_lock
+spinlock accessing the hw PPE table. airoha_ppe_foe_get_entry routine is
+always executed holding ppe_lock except in airoha_ppe_debugfs_foe_show
+routine. Fix the problem introducing airoha_ppe_foe_get_entry_locked
+routine.
+
+Fixes: 3fe15c640f380 ("net: airoha: Introduce PPE debugfs support")
+Link: https://patch.msgid.link/20250731-airoha_ppe_foe_get_entry_locked-v2-1-50efbd8c0fd6@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 26 ++++++++++++++++++------
+ 1 file changed, 20 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -508,9 +508,11 @@ static void airoha_ppe_foe_flow_stats_up
+ FIELD_PREP(AIROHA_FOE_IB2_NBQ, nbq);
+ }
+
+-struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
+- u32 hash)
++static struct airoha_foe_entry *
++airoha_ppe_foe_get_entry_locked(struct airoha_ppe *ppe, u32 hash)
+ {
++ lockdep_assert_held(&ppe_lock);
++
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ struct airoha_eth *eth = ppe->eth;
+@@ -537,6 +539,18 @@ struct airoha_foe_entry *airoha_ppe_foe_
+ return ppe->foe + hash * sizeof(struct airoha_foe_entry);
+ }
+
++struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe,
++ u32 hash)
++{
++ struct airoha_foe_entry *hwe;
++
++ spin_lock_bh(&ppe_lock);
++ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
++ spin_unlock_bh(&ppe_lock);
++
++ return hwe;
++}
++
+ static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e,
+ struct airoha_foe_entry *hwe)
+ {
+@@ -651,7 +665,7 @@ airoha_ppe_foe_commit_subflow_entry(stru
+ struct airoha_flow_table_entry *f;
+ int type;
+
+- hwe_p = airoha_ppe_foe_get_entry(ppe, hash);
++ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ if (!hwe_p)
+ return -EINVAL;
+
+@@ -703,7 +717,7 @@ static void airoha_ppe_foe_insert_entry(
+
+ spin_lock_bh(&ppe_lock);
+
+- hwe = airoha_ppe_foe_get_entry(ppe, hash);
++ hwe = airoha_ppe_foe_get_entry_locked(ppe, hash);
+ if (!hwe)
+ goto unlock;
+
+@@ -818,7 +832,7 @@ airoha_ppe_foe_flow_l2_entry_update(stru
+ u32 ib1, state;
+ int idle;
+
+- hwe = airoha_ppe_foe_get_entry(ppe, iter->hash);
++ hwe = airoha_ppe_foe_get_entry_locked(ppe, iter->hash);
+ if (!hwe)
+ continue;
+
+@@ -855,7 +869,7 @@ static void airoha_ppe_foe_flow_entry_up
+ if (e->hash == 0xffff)
+ goto unlock;
+
+- hwe_p = airoha_ppe_foe_get_entry(ppe, e->hash);
++ hwe_p = airoha_ppe_foe_get_entry_locked(ppe, e->hash);
+ if (!hwe_p)
+ goto unlock;
+
--- /dev/null
+From 9f6b606b6b37e61427412708411e8e04b1a858e8 Mon Sep 17 00:00:00 2001
+Date: Mon, 18 Aug 2025 11:58:25 +0200
+Subject: [PATCH] net: airoha: ppe: Do not invalid PPE entries in case of SW
+ hash collision
+
+SW hash computed by airoha_ppe_foe_get_entry_hash routine (used for
+foe_flow hlist) can theoretically produce collisions between two
+different HW PPE entries.
+In airoha_ppe_foe_insert_entry() if the collision occurs we will mark
+the second PPE entry in the list as stale (setting the hw hash to 0xffff).
+Stale entries are no more updated in airoha_ppe_foe_flow_entry_update
+routine and so they are removed by Netfilter.
+Fix the problem not marking the second entry as stale in
+airoha_ppe_foe_insert_entry routine if we have already inserted the
+brand new entry in the PPE table and let Netfilter remove real stale
+entries according to their timestamp.
+Please note this is just a theoretical issue spotted reviewing the code
+and not faced running the system.
+
+Fixes: cd53f622611f9 ("net: airoha: Add L2 hw acceleration support")
+Link: https://patch.msgid.link/20250818-airoha-en7581-hash-collision-fix-v1-1-d190c4b53d1c@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -736,10 +736,8 @@ static void airoha_ppe_foe_insert_entry(
+ continue;
+ }
+
+- if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
+- e->hash = 0xffff;
++ if (!airoha_ppe_foe_compare_entry(e, hwe))
+ continue;
+- }
+
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ commit_done = true;
--- /dev/null
+From 9fb6fef0fb49124291837af1da5028f79d53f98e Mon Sep 17 00:00:00 2001
+Date: Fri, 14 Jun 2024 13:06:03 +0300
+Subject: [PATCH] resource: Add resource set range and size helpers
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Setting the end address for a resource with a given size lacks a helper and
+is therefore coded manually unlike the getter side which has a helper for
+resource size calculation. Also, almost all callsites that calculate the
+end address for a resource also set the start address right before it like
+this:
+
+ res->start = start_addr;
+ res->end = res->start + size - 1;
+
+Add resource_set_range(res, start_addr, size) that sets the start address
+and calculates the end address to simplify this often repeated fragment.
+
+Also add resource_set_size() for the cases where setting the start address
+of the resource is not necessary but mention in its kerneldoc that
+resource_set_range() is preferred when setting both addresses.
+
+---
+ include/linux/ioport.h | 32 ++++++++++++++++++++++++++++++++
+ 1 file changed, 32 insertions(+)
+
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -216,6 +216,38 @@ struct resource *lookup_resource(struct
+ int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+ resource_size_t resource_alignment(struct resource *res);
++
++/**
++ * resource_set_size - Calculate resource end address from size and start
++ * @res: Resource descriptor
++ * @size: Size of the resource
++ *
++ * Calculate the end address for @res based on @size.
++ *
++ * Note: The start address of @res must be set when calling this function.
++ * Prefer resource_set_range() if setting both the start address and @size.
++ */
++static inline void resource_set_size(struct resource *res, resource_size_t size)
++{
++ res->end = res->start + size - 1;
++}
++
++/**
++ * resource_set_range - Set resource start and end addresses
++ * @res: Resource descriptor
++ * @start: Start address for the resource
++ * @size: Size of the resource
++ *
++ * Set @res start address and calculate the end address based on @size.
++ */
++static inline void resource_set_range(struct resource *res,
++ resource_size_t start,
++ resource_size_t size)
++{
++ res->start = start;
++ resource_set_size(res, size);
++}
++
+ static inline resource_size_t resource_size(const struct resource *res)
+ {
+ return res->end - res->start + 1;
--- /dev/null
+From f4fcfdda2fd8834c62dcb9bfddcf1f89d190b70e Mon Sep 17 00:00:00 2001
+Date: Wed, 23 Apr 2025 14:42:13 -0500
+Subject: [PATCH] of: reserved_mem: Add functions to parse "memory-region"
+
+Drivers with "memory-region" properties currently have to do their own
+parsing of "memory-region" properties. The result is all the drivers
+have similar patterns of a call to parse "memory-region" and then get
+the region's address and size. As this is a standard property, it should
+have common functions for drivers to use. Add new functions to count the
+number of regions and retrieve the region's address as a resource.
+
+---
+ drivers/of/of_reserved_mem.c | 80 +++++++++++++++++++++++++++++++++
+ include/linux/of_reserved_mem.h | 26 +++++++++++
+ 2 files changed, 106 insertions(+)
+
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -12,6 +12,7 @@
+ #define pr_fmt(fmt) "OF: reserved mem: " fmt
+
+ #include <linux/err.h>
++#include <linux/ioport.h>
+ #include <linux/of.h>
+ #include <linux/of_fdt.h>
+ #include <linux/of_platform.h>
+@@ -514,3 +515,82 @@ struct reserved_mem *of_reserved_mem_loo
+ return NULL;
+ }
+ EXPORT_SYMBOL_GPL(of_reserved_mem_lookup);
++
++/**
++ * of_reserved_mem_region_to_resource() - Get a reserved memory region as a resource
++ * @np: node containing 'memory-region' property
++ * @idx: index of 'memory-region' property to lookup
++ * @res: Pointer to a struct resource to fill in with reserved region
++ *
++ * This function allows drivers to lookup a node's 'memory-region' property
++ * entries by index and return a struct resource for the entry.
++ *
++ * Returns 0 on success with @res filled in. Returns -ENODEV if 'memory-region'
++ * is missing or unavailable, -EINVAL for any other error.
++ */
++int of_reserved_mem_region_to_resource(const struct device_node *np,
++ unsigned int idx, struct resource *res)
++{
++ struct reserved_mem *rmem;
++
++ if (!np)
++ return -EINVAL;
++
++ struct device_node __free(device_node) *target = of_parse_phandle(np, "memory-region", idx);
++ if (!target || !of_device_is_available(target))
++ return -ENODEV;
++
++ rmem = of_reserved_mem_lookup(target);
++ if (!rmem)
++ return -EINVAL;
++
++ resource_set_range(res, rmem->base, rmem->size);
++ res->name = rmem->name;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource);
++
++/**
++ * of_reserved_mem_region_to_resource_byname() - Get a reserved memory region as a resource
++ * @np: node containing 'memory-region' property
++ * @name: name of 'memory-region' property entry to lookup
++ * @res: Pointer to a struct resource to fill in with reserved region
++ *
++ * This function allows drivers to lookup a node's 'memory-region' property
++ * entries by name and return a struct resource for the entry.
++ *
++ * Returns 0 on success with @res filled in, or a negative error-code on
++ * failure.
++ */
++int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
++ const char *name,
++ struct resource *res)
++{
++ int idx;
++
++ if (!name)
++ return -EINVAL;
++
++ idx = of_property_match_string(np, "memory-region-names", name);
++ if (idx < 0)
++ return idx;
++
++ return of_reserved_mem_region_to_resource(np, idx, res);
++}
++EXPORT_SYMBOL_GPL(of_reserved_mem_region_to_resource_byname);
++
++/**
++ * of_reserved_mem_region_count() - Return the number of 'memory-region' entries
++ * @np: node containing 'memory-region' property
++ *
++ * This function allows drivers to retrieve the number of entries for a node's
++ * 'memory-region' property.
++ *
++ * Returns the number of entries on success, or negative error code on a
++ * malformed property.
++ */
++int of_reserved_mem_region_count(const struct device_node *np)
++{
++ return of_count_phandle_with_args(np, "memory-region", NULL);
++}
++EXPORT_SYMBOL_GPL(of_reserved_mem_region_count);
+--- a/include/linux/of_reserved_mem.h
++++ b/include/linux/of_reserved_mem.h
+@@ -7,6 +7,7 @@
+
+ struct of_phandle_args;
+ struct reserved_mem_ops;
++struct resource;
+
+ struct reserved_mem {
+ const char *name;
+@@ -40,6 +41,12 @@ int of_reserved_mem_device_init_by_name(
+ void of_reserved_mem_device_release(struct device *dev);
+
+ struct reserved_mem *of_reserved_mem_lookup(struct device_node *np);
++int of_reserved_mem_region_to_resource(const struct device_node *np,
++ unsigned int idx, struct resource *res);
++int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
++ const char *name, struct resource *res);
++int of_reserved_mem_region_count(const struct device_node *np);
++
+ #else
+
+ #define RESERVEDMEM_OF_DECLARE(name, compat, init) \
+@@ -64,6 +71,25 @@ static inline struct reserved_mem *of_re
+ {
+ return NULL;
+ }
++
++static inline int of_reserved_mem_region_to_resource(const struct device_node *np,
++ unsigned int idx,
++ struct resource *res)
++{
++ return -ENOSYS;
++}
++
++static inline int of_reserved_mem_region_to_resource_byname(const struct device_node *np,
++ const char *name,
++ struct resource *res)
++{
++ return -ENOSYS;
++}
++
++static inline int of_reserved_mem_region_count(const struct device_node *np)
++{
++ return 0;
++}
+ #endif
+
+ /**
--- /dev/null
+From 564923b02c1d2fe02ee789f9849ff79979b63b9f Mon Sep 17 00:00:00 2001
+Date: Mon, 11 Aug 2025 17:31:37 +0200
+Subject: [PATCH 1/6] net: airoha: npu: Add NPU wlan memory initialization
+ commands
+
+Introduce wlan_init_reserved_memory callback used by MT76 driver during
+NPU wlan offloading setup.
+This is a preliminary patch to enable wlan flowtable offload for EN7581
+SoC with MT76 driver.
+
+---
+ drivers/net/ethernet/airoha/airoha_npu.c | 82 ++++++++++++++++++++++++
+ drivers/net/ethernet/airoha/airoha_npu.h | 38 +++++++++++
+ 2 files changed, 120 insertions(+)
+
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -124,6 +124,13 @@ struct ppe_mbox_data {
+ };
+ };
+
++struct wlan_mbox_data {
++ u32 ifindex:4;
++ u32 func_type:4;
++ u32 func_id;
++ DECLARE_FLEX_ARRAY(u8, d);
++};
++
+ static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id,
+ void *p, int size)
+ {
+@@ -390,6 +397,80 @@ out:
+ return err;
+ }
+
++static int airoha_npu_wlan_msg_send(struct airoha_npu *npu, int ifindex,
++ enum airoha_npu_wlan_set_cmd func_id,
++ void *data, int data_len, gfp_t gfp)
++{
++ struct wlan_mbox_data *wlan_data;
++ int err, len;
++
++ len = sizeof(*wlan_data) + data_len;
++ wlan_data = kzalloc(len, gfp);
++ if (!wlan_data)
++ return -ENOMEM;
++
++ wlan_data->ifindex = ifindex;
++ wlan_data->func_type = NPU_OP_SET;
++ wlan_data->func_id = func_id;
++ memcpy(wlan_data->d, data, data_len);
++
++ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
++ kfree(wlan_data);
++
++ return err;
++}
++
++static int
++airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
++ int ifindex, const char *name,
++ enum airoha_npu_wlan_set_cmd func_id)
++{
++ struct device *dev = npu->dev;
++ struct resource res;
++ int err;
++ u32 val;
++
++ err = of_reserved_mem_region_to_resource_byname(dev->of_node, name,
++ &res);
++ if (err)
++ return err;
++
++ val = res.start;
++ return airoha_npu_wlan_msg_send(npu, ifindex, func_id, &val,
++ sizeof(val), GFP_KERNEL);
++}
++
++static int airoha_npu_wlan_init_memory(struct airoha_npu *npu)
++{
++ enum airoha_npu_wlan_set_cmd cmd = WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU;
++ u32 val = 0;
++ int err;
++
++ err = airoha_npu_wlan_msg_send(npu, 1, cmd, &val, sizeof(val),
++ GFP_KERNEL);
++ if (err)
++ return err;
++
++ cmd = WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR;
++ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-bufid", cmd);
++ if (err)
++ return err;
++
++ cmd = WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR;
++ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "pkt", cmd);
++ if (err)
++ return err;
++
++ cmd = WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR;
++ err = airoha_npu_wlan_set_reserved_memory(npu, 0, "tx-pkt", cmd);
++ if (err)
++ return err;
++
++ cmd = WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU;
++ return airoha_npu_wlan_msg_send(npu, 0, cmd, &val, sizeof(val),
++ GFP_KERNEL);
++}
++
+ struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
+ {
+ struct platform_device *pdev;
+@@ -493,6 +574,7 @@ static int airoha_npu_probe(struct platf
+ npu->ops.ppe_deinit = airoha_npu_ppe_deinit;
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
++ npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, ®map_config);
+ if (IS_ERR(npu->regmap))
+--- a/drivers/net/ethernet/airoha/airoha_npu.h
++++ b/drivers/net/ethernet/airoha/airoha_npu.h
+@@ -6,6 +6,43 @@
+
+ #define NPU_NUM_CORES 8
+
++enum airoha_npu_wlan_set_cmd {
++ WLAN_FUNC_SET_WAIT_PCIE_ADDR,
++ WLAN_FUNC_SET_WAIT_DESC,
++ WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
++ WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
++ WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
++ WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
++ WLAN_FUNC_SET_WAIT_DEL_STA,
++ WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
++ WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
++ WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
++ WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
++ WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
++ WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
++ WLAN_FUNC_SET_WAIT_PCIE_STATE,
++ WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
++ WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
++ WLAN_FUNC_SET_WAIT_BAR_INFO,
++ WLAN_FUNC_SET_WAIT_FAST_FLAG,
++ WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
++ WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
++ WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
++ WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
++ WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
++ WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
++ WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
++ WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
++ WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
++ WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
++ WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
++ WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
++ WLAN_FUNC_SET_WAIT_HWNAT_INIT,
++ WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
++ WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
++ WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
++};
++
+ struct airoha_npu {
+ struct device *dev;
+ struct regmap *regmap;
+@@ -29,6 +66,7 @@ struct airoha_npu {
+ dma_addr_t foe_addr,
+ u32 entry_size, u32 hash,
+ bool ppe2);
++ int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
+ } ops;
+ };
+
--- /dev/null
+From f97fc66185b2004ad5f393f78b3e645009ddd1d0 Mon Sep 17 00:00:00 2001
+Date: Mon, 11 Aug 2025 17:31:38 +0200
+Subject: [PATCH 2/6] net: airoha: npu: Add wlan_{send,get}_msg NPU callbacks
+
+Introduce wlan_send_msg() and wlan_get_msg() NPU wlan callbacks used
+by the wlan driver (MT76) to initialize NPU module registers in order to
+offload wireless-wired traffic.
+This is a preliminary patch to enable wlan flowtable offload for EN7581
+SoC with MT76 driver.
+
+---
+ drivers/net/ethernet/airoha/airoha_npu.c | 52 ++++++++++++++++++++++++
+ drivers/net/ethernet/airoha/airoha_npu.h | 22 ++++++++++
+ 2 files changed, 74 insertions(+)
+
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -42,6 +42,22 @@
+ #define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2))
+ #define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2))
+
++#define NPU_WLAN_BASE_ADDR 0x30d000
++
++#define REG_IRQ_STATUS (NPU_WLAN_BASE_ADDR + 0x030)
++#define REG_IRQ_RXDONE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 2) + 0x034)
++#define NPU_IRQ_RX_MASK(_n) ((_n) == 1 ? BIT(17) : BIT(16))
++
++#define REG_TX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x080)
++#define REG_TX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x084)
++#define REG_TX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x088)
++#define REG_TX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x08c)
++
++#define REG_RX_BASE(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x180)
++#define REG_RX_DSCP_NUM(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x184)
++#define REG_RX_CPU_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x188)
++#define REG_RX_DMA_IDX(_n) (NPU_WLAN_BASE_ADDR + ((_n) << 4) + 0x18c)
++
+ #define NPU_TIMER_BASE_ADDR 0x310100
+ #define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100))
+ #define WDT_EN_MASK BIT(25)
+@@ -420,6 +436,30 @@ static int airoha_npu_wlan_msg_send(stru
+ return err;
+ }
+
++static int airoha_npu_wlan_msg_get(struct airoha_npu *npu, int ifindex,
++ enum airoha_npu_wlan_get_cmd func_id,
++ void *data, int data_len, gfp_t gfp)
++{
++ struct wlan_mbox_data *wlan_data;
++ int err, len;
++
++ len = sizeof(*wlan_data) + data_len;
++ wlan_data = kzalloc(len, gfp);
++ if (!wlan_data)
++ return -ENOMEM;
++
++ wlan_data->ifindex = ifindex;
++ wlan_data->func_type = NPU_OP_GET;
++ wlan_data->func_id = func_id;
++
++ err = airoha_npu_send_msg(npu, NPU_FUNC_WIFI, wlan_data, len);
++ if (!err)
++ memcpy(data, wlan_data->d, data_len);
++ kfree(wlan_data);
++
++ return err;
++}
++
+ static int
+ airoha_npu_wlan_set_reserved_memory(struct airoha_npu *npu,
+ int ifindex, const char *name,
+@@ -471,6 +511,15 @@ static int airoha_npu_wlan_init_memory(s
+ GFP_KERNEL);
+ }
+
++static u32 airoha_npu_wlan_queue_addr_get(struct airoha_npu *npu, int qid,
++ bool xmit)
++{
++ if (xmit)
++ return REG_TX_BASE(qid + 2);
++
++ return REG_RX_BASE(qid);
++}
++
+ struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
+ {
+ struct platform_device *pdev;
+@@ -575,6 +624,9 @@ static int airoha_npu_probe(struct platf
+ npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries;
+ npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry;
+ npu->ops.wlan_init_reserved_memory = airoha_npu_wlan_init_memory;
++ npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
++ npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
++ npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, ®map_config);
+ if (IS_ERR(npu->regmap))
+--- a/drivers/net/ethernet/airoha/airoha_npu.h
++++ b/drivers/net/ethernet/airoha/airoha_npu.h
+@@ -43,6 +43,20 @@ enum airoha_npu_wlan_set_cmd {
+ WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
+ };
+
++enum airoha_npu_wlan_get_cmd {
++ WLAN_FUNC_GET_WAIT_NPU_INFO,
++ WLAN_FUNC_GET_WAIT_LAST_RATE,
++ WLAN_FUNC_GET_WAIT_COUNTER,
++ WLAN_FUNC_GET_WAIT_DBG_COUNTER,
++ WLAN_FUNC_GET_WAIT_RXDESC_BASE,
++ WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
++ WLAN_FUNC_GET_WAIT_DMA_ADDR,
++ WLAN_FUNC_GET_WAIT_RING_SIZE,
++ WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
++ WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
++ WLAN_FUNC_GET_WAIT_NPU_VERSION,
++};
++
+ struct airoha_npu {
+ struct device *dev;
+ struct regmap *regmap;
+@@ -67,6 +81,14 @@ struct airoha_npu {
+ u32 entry_size, u32 hash,
+ bool ppe2);
+ int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
++ int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
++ enum airoha_npu_wlan_set_cmd func_id,
++ void *data, int data_len, gfp_t gfp);
++ int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
++ enum airoha_npu_wlan_get_cmd func_id,
++ void *data, int data_len, gfp_t gfp);
++ u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
++ bool xmit);
+ } ops;
+ };
+
--- /dev/null
+From 03b7ca3ee5e1b700c462aed5b6cb88f616d6ba7f Mon Sep 17 00:00:00 2001
+Date: Mon, 11 Aug 2025 17:31:39 +0200
+Subject: [PATCH 3/6] net: airoha: npu: Add wlan irq management callbacks
+
+Introduce callbacks used by the MT76 driver to configure NPU SoC
+interrupts. This is a preliminary patch to enable wlan flowtable
+offload for EN7581 SoC with MT76 driver.
+
+---
+ drivers/net/ethernet/airoha/airoha_npu.c | 27 ++++++++++++++++++++++++
+ drivers/net/ethernet/airoha/airoha_npu.h | 4 ++++
+ 2 files changed, 31 insertions(+)
+
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -520,6 +520,29 @@ static u32 airoha_npu_wlan_queue_addr_ge
+ return REG_RX_BASE(qid);
+ }
+
++static void airoha_npu_wlan_irq_status_set(struct airoha_npu *npu, u32 val)
++{
++ regmap_write(npu->regmap, REG_IRQ_STATUS, val);
++}
++
++static u32 airoha_npu_wlan_irq_status_get(struct airoha_npu *npu, int q)
++{
++ u32 val;
++
++ regmap_read(npu->regmap, REG_IRQ_STATUS, &val);
++ return val;
++}
++
++static void airoha_npu_wlan_irq_enable(struct airoha_npu *npu, int q)
++{
++ regmap_set_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
++}
++
++static void airoha_npu_wlan_irq_disable(struct airoha_npu *npu, int q)
++{
++ regmap_clear_bits(npu->regmap, REG_IRQ_RXDONE(q), NPU_IRQ_RX_MASK(q));
++}
++
+ struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
+ {
+ struct platform_device *pdev;
+@@ -627,6 +650,10 @@ static int airoha_npu_probe(struct platf
+ npu->ops.wlan_send_msg = airoha_npu_wlan_msg_send;
+ npu->ops.wlan_get_msg = airoha_npu_wlan_msg_get;
+ npu->ops.wlan_get_queue_addr = airoha_npu_wlan_queue_addr_get;
++ npu->ops.wlan_set_irq_status = airoha_npu_wlan_irq_status_set;
++ npu->ops.wlan_get_irq_status = airoha_npu_wlan_irq_status_get;
++ npu->ops.wlan_enable_irq = airoha_npu_wlan_irq_enable;
++ npu->ops.wlan_disable_irq = airoha_npu_wlan_irq_disable;
+
+ npu->regmap = devm_regmap_init_mmio(dev, base, ®map_config);
+ if (IS_ERR(npu->regmap))
+--- a/drivers/net/ethernet/airoha/airoha_npu.h
++++ b/drivers/net/ethernet/airoha/airoha_npu.h
+@@ -89,6 +89,10 @@ struct airoha_npu {
+ void *data, int data_len, gfp_t gfp);
+ u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
+ bool xmit);
++ void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
++ u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
++ void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
++ void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
+ } ops;
+ };
+
--- /dev/null
+From a1740b16c83729d908c760eaa821f27b51e58a13 Mon Sep 17 00:00:00 2001
+Date: Mon, 11 Aug 2025 17:31:40 +0200
+Subject: [PATCH 4/6] net: airoha: npu: Read NPU wlan interrupt lines from the
+ DTS
+
+Read all NPU wlan IRQ lines from the NPU device-tree node.
+NPU module fires wlan irq lines when the traffic to/from the WiFi NIC is
+not hw accelerated (these interrupts will be consumed by the MT76 driver
+in subsequent patches).
+This is a preliminary patch to enable wlan flowtable offload for EN7581
+SoC.
+
+---
+ drivers/net/ethernet/airoha/airoha_npu.c | 9 +++++++++
+ drivers/net/ethernet/airoha/airoha_npu.h | 3 +++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -696,6 +696,15 @@ static int airoha_npu_probe(struct platf
+ INIT_WORK(&core->wdt_work, airoha_npu_wdt_work);
+ }
+
++ /* wlan IRQ lines */
++ for (i = 0; i < ARRAY_SIZE(npu->irqs); i++) {
++ irq = platform_get_irq(pdev, i + ARRAY_SIZE(npu->cores) + 1);
++ if (irq < 0)
++ return irq;
++
++ npu->irqs[i] = irq;
++ }
++
+ err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+--- a/drivers/net/ethernet/airoha/airoha_npu.h
++++ b/drivers/net/ethernet/airoha/airoha_npu.h
+@@ -5,6 +5,7 @@
+ */
+
+ #define NPU_NUM_CORES 8
++#define NPU_NUM_IRQ 6
+
+ enum airoha_npu_wlan_set_cmd {
+ WLAN_FUNC_SET_WAIT_PCIE_ADDR,
+@@ -68,6 +69,8 @@ struct airoha_npu {
+ struct work_struct wdt_work;
+ } cores[NPU_NUM_CORES];
+
++ int irqs[NPU_NUM_IRQ];
++
+ struct airoha_foe_stats __iomem *stats;
+
+ struct {
--- /dev/null
+From 29c4a3ce508961a02d185ead2d52699b16d82c6d Mon Sep 17 00:00:00 2001
+Date: Mon, 11 Aug 2025 17:31:41 +0200
+Subject: [PATCH 5/6] net: airoha: npu: Enable core 3 for WiFi offloading
+
+NPU core 3 is responsible for WiFi offloading so enable it during NPU
+probe.
+
+---
+ drivers/net/ethernet/airoha/airoha_npu.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -726,8 +726,7 @@ static int airoha_npu_probe(struct platf
+ usleep_range(1000, 2000);
+
+ /* enable NPU cores */
+- /* do not start core3 since it is used for WiFi offloading */
+- regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7);
++ regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xff);
+ regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1);
+ msleep(100);
+
--- /dev/null
+From b3ef7bdec66fb1813e865fd39d179a93cefd2015 Mon Sep 17 00:00:00 2001
+Date: Mon, 11 Aug 2025 17:31:42 +0200
+Subject: [PATCH 6/6] net: airoha: Add airoha_offload.h header
+
+Move NPU definitions to airoha_offload.h in include/linux/soc/airoha/ in
+order to allow the MT76 driver to access the callback definitions.
+
+---
+ drivers/net/ethernet/airoha/airoha_npu.c | 2 +-
+ drivers/net/ethernet/airoha/airoha_npu.h | 103 ---------
+ drivers/net/ethernet/airoha/airoha_ppe.c | 2 +-
+ include/linux/soc/airoha/airoha_offload.h | 260 ++++++++++++++++++++++
+ 4 files changed, 262 insertions(+), 105 deletions(-)
+ delete mode 100644 drivers/net/ethernet/airoha/airoha_npu.h
+ create mode 100644 include/linux/soc/airoha/airoha_offload.h
+
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -11,9 +11,9 @@
+ #include <linux/of_platform.h>
+ #include <linux/of_reserved_mem.h>
+ #include <linux/regmap.h>
++#include <linux/soc/airoha/airoha_offload.h>
+
+ #include "airoha_eth.h"
+-#include "airoha_npu.h"
+
+ #define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin"
+ #define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin"
+--- a/drivers/net/ethernet/airoha/airoha_npu.h
++++ /dev/null
+@@ -1,103 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-only */
+-/*
+- * Copyright (c) 2025 AIROHA Inc
+- */
+-
+-#define NPU_NUM_CORES 8
+-#define NPU_NUM_IRQ 6
+-
+-enum airoha_npu_wlan_set_cmd {
+- WLAN_FUNC_SET_WAIT_PCIE_ADDR,
+- WLAN_FUNC_SET_WAIT_DESC,
+- WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
+- WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
+- WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
+- WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
+- WLAN_FUNC_SET_WAIT_DEL_STA,
+- WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
+- WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
+- WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
+- WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
+- WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
+- WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
+- WLAN_FUNC_SET_WAIT_PCIE_STATE,
+- WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
+- WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
+- WLAN_FUNC_SET_WAIT_BAR_INFO,
+- WLAN_FUNC_SET_WAIT_FAST_FLAG,
+- WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
+- WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
+- WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
+- WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
+- WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
+- WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
+- WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
+- WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
+- WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
+- WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
+- WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
+- WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
+- WLAN_FUNC_SET_WAIT_HWNAT_INIT,
+- WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
+- WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
+- WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
+-};
+-
+-enum airoha_npu_wlan_get_cmd {
+- WLAN_FUNC_GET_WAIT_NPU_INFO,
+- WLAN_FUNC_GET_WAIT_LAST_RATE,
+- WLAN_FUNC_GET_WAIT_COUNTER,
+- WLAN_FUNC_GET_WAIT_DBG_COUNTER,
+- WLAN_FUNC_GET_WAIT_RXDESC_BASE,
+- WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
+- WLAN_FUNC_GET_WAIT_DMA_ADDR,
+- WLAN_FUNC_GET_WAIT_RING_SIZE,
+- WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
+- WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
+- WLAN_FUNC_GET_WAIT_NPU_VERSION,
+-};
+-
+-struct airoha_npu {
+- struct device *dev;
+- struct regmap *regmap;
+-
+- struct airoha_npu_core {
+- struct airoha_npu *npu;
+- /* protect concurrent npu memory accesses */
+- spinlock_t lock;
+- struct work_struct wdt_work;
+- } cores[NPU_NUM_CORES];
+-
+- int irqs[NPU_NUM_IRQ];
+-
+- struct airoha_foe_stats __iomem *stats;
+-
+- struct {
+- int (*ppe_init)(struct airoha_npu *npu);
+- int (*ppe_deinit)(struct airoha_npu *npu);
+- int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
+- dma_addr_t foe_addr,
+- int sram_num_entries);
+- int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
+- dma_addr_t foe_addr,
+- u32 entry_size, u32 hash,
+- bool ppe2);
+- int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
+- int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
+- enum airoha_npu_wlan_set_cmd func_id,
+- void *data, int data_len, gfp_t gfp);
+- int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
+- enum airoha_npu_wlan_get_cmd func_id,
+- void *data, int data_len, gfp_t gfp);
+- u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
+- bool xmit);
+- void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
+- u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
+- void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
+- void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
+- } ops;
+-};
+-
+-struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
+-void airoha_npu_put(struct airoha_npu *npu);
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -7,10 +7,10 @@
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
+ #include <linux/rhashtable.h>
++#include <linux/soc/airoha/airoha_offload.h>
+ #include <net/ipv6.h>
+ #include <net/pkt_cls.h>
+
+-#include "airoha_npu.h"
+ #include "airoha_regs.h"
+ #include "airoha_eth.h"
+
+--- /dev/null
++++ b/include/linux/soc/airoha/airoha_offload.h
+@@ -0,0 +1,260 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * Copyright (c) 2025 AIROHA Inc
++ */
++#ifndef AIROHA_OFFLOAD_H
++#define AIROHA_OFFLOAD_H
++
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++
++#define NPU_NUM_CORES 8
++#define NPU_NUM_IRQ 6
++#define NPU_RX0_DESC_NUM 512
++#define NPU_RX1_DESC_NUM 512
++
++/* CTRL */
++#define NPU_RX_DMA_DESC_LAST_MASK BIT(29)
++#define NPU_RX_DMA_DESC_LEN_MASK GENMASK(28, 15)
++#define NPU_RX_DMA_DESC_CUR_LEN_MASK GENMASK(14, 1)
++#define NPU_RX_DMA_DESC_DONE_MASK BIT(0)
++/* INFO */
++#define NPU_RX_DMA_PKT_COUNT_MASK GENMASK(31, 28)
++#define NPU_RX_DMA_PKT_ID_MASK GENMASK(28, 26)
++#define NPU_RX_DMA_SRC_PORT_MASK GENMASK(25, 21)
++#define NPU_RX_DMA_CRSN_MASK GENMASK(20, 16)
++#define NPU_RX_DMA_FOE_ID_MASK GENMASK(15, 0)
++/* DATA */
++#define NPU_RX_DMA_SID_MASK GENMASK(31, 16)
++#define NPU_RX_DMA_FRAG_TYPE_MASK GENMASK(15, 14)
++#define NPU_RX_DMA_PRIORITY_MASK GENMASK(13, 10)
++#define NPU_RX_DMA_RADIO_ID_MASK GENMASK(9, 6)
++#define NPU_RX_DMA_VAP_ID_MASK GENMASK(5, 2)
++#define NPU_RX_DMA_FRAME_TYPE_MASK GENMASK(1, 0)
++
++struct airoha_npu_rx_dma_desc {
++ u32 ctrl;
++ u32 info;
++ u32 data;
++ u32 addr;
++ u64 rsv;
++} __packed;
++
++/* CTRL */
++#define NPU_TX_DMA_DESC_SCHED_MASK BIT(31)
++#define NPU_TX_DMA_DESC_LEN_MASK GENMASK(30, 18)
++#define NPU_TX_DMA_DESC_VEND_LEN_MASK GENMASK(17, 1)
++#define NPU_TX_DMA_DESC_DONE_MASK BIT(0)
++
++#define NPU_TXWI_LEN 192
++
++struct airoha_npu_tx_dma_desc {
++ u32 ctrl;
++ u32 addr;
++ u64 rsv;
++ u8 txwi[NPU_TXWI_LEN];
++} __packed;
++
++enum airoha_npu_wlan_set_cmd {
++ WLAN_FUNC_SET_WAIT_PCIE_ADDR,
++ WLAN_FUNC_SET_WAIT_DESC,
++ WLAN_FUNC_SET_WAIT_NPU_INIT_DONE,
++ WLAN_FUNC_SET_WAIT_TRAN_TO_CPU,
++ WLAN_FUNC_SET_WAIT_BA_WIN_SIZE,
++ WLAN_FUNC_SET_WAIT_DRIVER_MODEL,
++ WLAN_FUNC_SET_WAIT_DEL_STA,
++ WLAN_FUNC_SET_WAIT_DRAM_BA_NODE_ADDR,
++ WLAN_FUNC_SET_WAIT_PKT_BUF_ADDR,
++ WLAN_FUNC_SET_WAIT_IS_TEST_NOBA,
++ WLAN_FUNC_SET_WAIT_FLUSHONE_TIMEOUT,
++ WLAN_FUNC_SET_WAIT_FLUSHALL_TIMEOUT,
++ WLAN_FUNC_SET_WAIT_IS_FORCE_TO_CPU,
++ WLAN_FUNC_SET_WAIT_PCIE_STATE,
++ WLAN_FUNC_SET_WAIT_PCIE_PORT_TYPE,
++ WLAN_FUNC_SET_WAIT_ERROR_RETRY_TIMES,
++ WLAN_FUNC_SET_WAIT_BAR_INFO,
++ WLAN_FUNC_SET_WAIT_FAST_FLAG,
++ WLAN_FUNC_SET_WAIT_NPU_BAND0_ONCPU,
++ WLAN_FUNC_SET_WAIT_TX_RING_PCIE_ADDR,
++ WLAN_FUNC_SET_WAIT_TX_DESC_HW_BASE,
++ WLAN_FUNC_SET_WAIT_TX_BUF_SPACE_HW_BASE,
++ WLAN_FUNC_SET_WAIT_RX_RING_FOR_TXDONE_HW_BASE,
++ WLAN_FUNC_SET_WAIT_TX_PKT_BUF_ADDR,
++ WLAN_FUNC_SET_WAIT_INODE_TXRX_REG_ADDR,
++ WLAN_FUNC_SET_WAIT_INODE_DEBUG_FLAG,
++ WLAN_FUNC_SET_WAIT_INODE_HW_CFG_INFO,
++ WLAN_FUNC_SET_WAIT_INODE_STOP_ACTION,
++ WLAN_FUNC_SET_WAIT_INODE_PCIE_SWAP,
++ WLAN_FUNC_SET_WAIT_RATELIMIT_CTRL,
++ WLAN_FUNC_SET_WAIT_HWNAT_INIT,
++ WLAN_FUNC_SET_WAIT_ARHT_CHIP_INFO,
++ WLAN_FUNC_SET_WAIT_TX_BUF_CHECK_ADDR,
++ WLAN_FUNC_SET_WAIT_TOKEN_ID_SIZE,
++};
++
++enum airoha_npu_wlan_get_cmd {
++ WLAN_FUNC_GET_WAIT_NPU_INFO,
++ WLAN_FUNC_GET_WAIT_LAST_RATE,
++ WLAN_FUNC_GET_WAIT_COUNTER,
++ WLAN_FUNC_GET_WAIT_DBG_COUNTER,
++ WLAN_FUNC_GET_WAIT_RXDESC_BASE,
++ WLAN_FUNC_GET_WAIT_WCID_DBG_COUNTER,
++ WLAN_FUNC_GET_WAIT_DMA_ADDR,
++ WLAN_FUNC_GET_WAIT_RING_SIZE,
++ WLAN_FUNC_GET_WAIT_NPU_SUPPORT_MAP,
++ WLAN_FUNC_GET_WAIT_MDC_LOCK_ADDRESS,
++ WLAN_FUNC_GET_WAIT_NPU_VERSION,
++};
++
++struct airoha_npu {
++#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
++ struct device *dev;
++ struct regmap *regmap;
++
++ struct airoha_npu_core {
++ struct airoha_npu *npu;
++ /* protect concurrent npu memory accesses */
++ spinlock_t lock;
++ struct work_struct wdt_work;
++ } cores[NPU_NUM_CORES];
++
++ int irqs[NPU_NUM_IRQ];
++
++ struct airoha_foe_stats __iomem *stats;
++
++ struct {
++ int (*ppe_init)(struct airoha_npu *npu);
++ int (*ppe_deinit)(struct airoha_npu *npu);
++ int (*ppe_flush_sram_entries)(struct airoha_npu *npu,
++ dma_addr_t foe_addr,
++ int sram_num_entries);
++ int (*ppe_foe_commit_entry)(struct airoha_npu *npu,
++ dma_addr_t foe_addr,
++ u32 entry_size, u32 hash,
++ bool ppe2);
++ int (*wlan_init_reserved_memory)(struct airoha_npu *npu);
++ int (*wlan_send_msg)(struct airoha_npu *npu, int ifindex,
++ enum airoha_npu_wlan_set_cmd func_id,
++ void *data, int data_len, gfp_t gfp);
++ int (*wlan_get_msg)(struct airoha_npu *npu, int ifindex,
++ enum airoha_npu_wlan_get_cmd func_id,
++ void *data, int data_len, gfp_t gfp);
++ u32 (*wlan_get_queue_addr)(struct airoha_npu *npu, int qid,
++ bool xmit);
++ void (*wlan_set_irq_status)(struct airoha_npu *npu, u32 val);
++ u32 (*wlan_get_irq_status)(struct airoha_npu *npu, int q);
++ void (*wlan_enable_irq)(struct airoha_npu *npu, int q);
++ void (*wlan_disable_irq)(struct airoha_npu *npu, int q);
++ } ops;
++#endif
++};
++
++#if (IS_BUILTIN(CONFIG_NET_AIROHA_NPU) || IS_MODULE(CONFIG_NET_AIROHA_NPU))
++struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr);
++void airoha_npu_put(struct airoha_npu *npu);
++
++static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
++{
++ return npu->ops.wlan_init_reserved_memory(npu);
++}
++
++static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
++ int ifindex,
++ enum airoha_npu_wlan_set_cmd cmd,
++ void *data, int data_len, gfp_t gfp)
++{
++ return npu->ops.wlan_send_msg(npu, ifindex, cmd, data, data_len, gfp);
++}
++
++static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
++ enum airoha_npu_wlan_get_cmd cmd,
++ void *data, int data_len, gfp_t gfp)
++{
++ return npu->ops.wlan_get_msg(npu, ifindex, cmd, data, data_len, gfp);
++}
++
++static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
++ int qid, bool xmit)
++{
++ return npu->ops.wlan_get_queue_addr(npu, qid, xmit);
++}
++
++static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
++ u32 val)
++{
++ npu->ops.wlan_set_irq_status(npu, val);
++}
++
++static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu, int q)
++{
++ return npu->ops.wlan_get_irq_status(npu, q);
++}
++
++static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
++{
++ npu->ops.wlan_enable_irq(npu, q);
++}
++
++static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
++{
++ npu->ops.wlan_disable_irq(npu, q);
++}
++#else
++static inline struct airoha_npu *airoha_npu_get(struct device *dev,
++ dma_addr_t *foe_stats_addr)
++{
++ return NULL;
++}
++
++static inline void airoha_npu_put(struct airoha_npu *npu)
++{
++}
++
++static inline int airoha_npu_wlan_init_reserved_memory(struct airoha_npu *npu)
++{
++ return -EOPNOTSUPP;
++}
++
++static inline int airoha_npu_wlan_send_msg(struct airoha_npu *npu,
++ int ifindex,
++ enum airoha_npu_wlan_set_cmd cmd,
++ void *data, int data_len, gfp_t gfp)
++{
++ return -EOPNOTSUPP;
++}
++
++static inline int airoha_npu_wlan_get_msg(struct airoha_npu *npu, int ifindex,
++ enum airoha_npu_wlan_get_cmd cmd,
++ void *data, int data_len, gfp_t gfp)
++{
++ return -EOPNOTSUPP;
++}
++
++static inline u32 airoha_npu_wlan_get_queue_addr(struct airoha_npu *npu,
++ int qid, bool xmit)
++{
++ return 0;
++}
++
++static inline void airoha_npu_wlan_set_irq_status(struct airoha_npu *npu,
++ u32 val)
++{
++}
++
++static inline u32 airoha_npu_wlan_get_irq_status(struct airoha_npu *npu,
++ int q)
++{
++ return 0;
++}
++
++static inline void airoha_npu_wlan_enable_irq(struct airoha_npu *npu, int q)
++{
++}
++
++static inline void airoha_npu_wlan_disable_irq(struct airoha_npu *npu, int q)
++{
++}
++#endif
++
++#endif /* AIROHA_OFFLOAD_H */
--- /dev/null
+From a8bdd935d1ddb7186358fb60ffe84253e85340c8 Mon Sep 17 00:00:00 2001
+Date: Thu, 14 Aug 2025 09:51:16 +0200
+Subject: [PATCH] net: airoha: Add wlan flowtable TX offload
+
+Introduce support to offload the traffic received on the ethernet NIC
+and forwarded to the wireless one using HW Packet Processor Engine (PPE)
+capabilities.
+
+Link: https://patch.msgid.link/20250814-airoha-en7581-wlan-tx-offload-v1-1-72e0a312003e@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_eth.h | 11 +++
+ drivers/net/ethernet/airoha/airoha_ppe.c | 103 ++++++++++++++++-------
+ 2 files changed, 85 insertions(+), 29 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -252,6 +252,10 @@ enum {
+ #define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16)
+ #define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0)
+
++#define AIROHA_FOE_MAC_WDMA_QOS GENMASK(15, 12)
++#define AIROHA_FOE_MAC_WDMA_BAND BIT(11)
++#define AIROHA_FOE_MAC_WDMA_WCID GENMASK(10, 0)
++
+ struct airoha_foe_mac_info_common {
+ u16 vlan1;
+ u16 etype;
+@@ -481,6 +485,13 @@ struct airoha_flow_table_entry {
+ unsigned long cookie;
+ };
+
++struct airoha_wdma_info {
++ u8 idx;
++ u8 queue;
++ u16 wcid;
++ u8 bss;
++};
++
+ /* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
+ #define RX_IRQ0_BANK_PIN_MASK 0x839f
+ #define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -190,6 +190,31 @@ static int airoha_ppe_flow_mangle_ipv4(c
+ return 0;
+ }
+
++static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr,
++ struct airoha_wdma_info *info)
++{
++ struct net_device_path_stack stack;
++ struct net_device_path *path;
++ int err;
++
++ if (!dev)
++ return -ENODEV;
++
++ err = dev_fill_forward_path(dev, addr, &stack);
++ if (err)
++ return err;
++
++ path = &stack.path[stack.num_paths - 1];
++ if (path->type != DEV_PATH_MTK_WDMA)
++ return -1;
++
++ info->idx = path->mtk_wdma.wdma_idx;
++ info->bss = path->mtk_wdma.bss;
++ info->wcid = path->mtk_wdma.wcid;
++
++ return 0;
++}
++
+ static int airoha_get_dsa_port(struct net_device **dev)
+ {
+ #if IS_ENABLED(CONFIG_NET_DSA)
+@@ -220,9 +245,9 @@ static int airoha_ppe_foe_entry_prepare(
+ struct airoha_flow_data *data,
+ int l4proto)
+ {
+- int dsa_port = airoha_get_dsa_port(&dev);
++ u32 qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f), ports_pad, val;
++ int wlan_etype = -EINVAL, dsa_port = airoha_get_dsa_port(&dev);
+ struct airoha_foe_mac_info_common *l2;
+- u32 qdata, ports_pad, val;
+ u8 smac_id = 0xf;
+
+ memset(hwe, 0, sizeof(*hwe));
+@@ -236,31 +261,47 @@ static int airoha_ppe_foe_entry_prepare(
+ AIROHA_FOE_IB1_BIND_TTL;
+ hwe->ib1 = val;
+
+- val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) |
+- AIROHA_FOE_IB2_PSE_QOS;
+- if (dsa_port >= 0)
+- val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port);
+-
++ val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f);
+ if (dev) {
+- struct airoha_gdm_port *port = netdev_priv(dev);
+- u8 pse_port;
+-
+- if (!airoha_is_valid_gdm_port(eth, port))
+- return -EINVAL;
++ struct airoha_wdma_info info = {};
+
+- if (dsa_port >= 0)
+- pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+- else
+- pse_port = 2; /* uplink relies on GDM2 loopback */
+- val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port);
+-
+- /* For downlink traffic consume SRAM memory for hw forwarding
+- * descriptors queue.
+- */
+- if (airhoa_is_lan_gdm_port(port))
+- val |= AIROHA_FOE_IB2_FAST_PATH;
++ if (!airoha_ppe_get_wdma_info(dev, data->eth.h_dest, &info)) {
++ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, info.idx) |
++ FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT,
++ FE_PSE_PORT_CDM4);
++ qdata |= FIELD_PREP(AIROHA_FOE_ACTDP, info.bss);
++ wlan_etype = FIELD_PREP(AIROHA_FOE_MAC_WDMA_BAND,
++ info.idx) |
++ FIELD_PREP(AIROHA_FOE_MAC_WDMA_WCID,
++ info.wcid);
++ } else {
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ u8 pse_port;
++
++ if (!airoha_is_valid_gdm_port(eth, port))
++ return -EINVAL;
++
++ if (dsa_port >= 0)
++ pse_port = port->id == 4 ? FE_PSE_PORT_GDM4
++ : port->id;
++ else
++ pse_port = 2; /* uplink relies on GDM2
++ * loopback
++ */
++
++ val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port) |
++ AIROHA_FOE_IB2_PSE_QOS;
++ /* For downlink traffic consume SRAM memory for hw
++ * forwarding descriptors queue.
++ */
++ if (airhoa_is_lan_gdm_port(port))
++ val |= AIROHA_FOE_IB2_FAST_PATH;
++ if (dsa_port >= 0)
++ val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ,
++ dsa_port);
+
+- smac_id = port->id;
++ smac_id = port->id;
++ }
+ }
+
+ if (is_multicast_ether_addr(data->eth.h_dest))
+@@ -272,7 +313,6 @@ static int airoha_ppe_foe_entry_prepare(
+ if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ hwe->ipv6.ports = ports_pad;
+
+- qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f);
+ if (type == PPE_PKT_TYPE_BRIDGE) {
+ airoha_ppe_foe_set_bridge_addrs(&hwe->bridge, &data->eth);
+ hwe->bridge.data = qdata;
+@@ -313,7 +353,9 @@ static int airoha_ppe_foe_entry_prepare(
+ l2->vlan2 = data->vlan.hdr[1].id;
+ }
+
+- if (dsa_port >= 0) {
++ if (wlan_etype >= 0) {
++ l2->etype = wlan_etype;
++ } else if (dsa_port >= 0) {
+ l2->etype = BIT(dsa_port);
+ l2->etype |= !data->vlan.num ? BIT(15) : 0;
+ } else if (data->pppoe.num) {
+@@ -490,6 +532,10 @@ static void airoha_ppe_foe_flow_stats_up
+ meter = &hwe->ipv4.l2.meter;
+ }
+
++ pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
++ if (pse_port == FE_PSE_PORT_CDM4)
++ return;
++
+ airoha_ppe_foe_flow_stat_entry_reset(ppe, npu, index);
+
+ val = FIELD_GET(AIROHA_FOE_CHANNEL | AIROHA_FOE_QID, *data);
+@@ -500,7 +546,6 @@ static void airoha_ppe_foe_flow_stats_up
+ AIROHA_FOE_IB2_PSE_QOS | AIROHA_FOE_IB2_FAST_PATH);
+ *meter |= FIELD_PREP(AIROHA_FOE_TUNNEL_MTU, val);
+
+- pse_port = FIELD_GET(AIROHA_FOE_IB2_PSE_PORT, *ib2);
+ nbq = pse_port == 1 ? 6 : 5;
+ *ib2 &= ~(AIROHA_FOE_IB2_NBQ | AIROHA_FOE_IB2_PSE_PORT |
+ AIROHA_FOE_IB2_PSE_QOS);
--- /dev/null
+From 524a43c3a0c17fa0a1223eea36751dcba55e5530 Mon Sep 17 00:00:00 2001
+Date: Sat, 23 Aug 2025 09:56:02 +0200
+Subject: [PATCH 1/3] net: airoha: Rely on airoha_eth struct in
+ airoha_ppe_flow_offload_cmd signature
+
+Rely on airoha_eth struct in airoha_ppe_flow_offload_cmd routine
+signature and in all the called subroutines.
+This is a preliminary patch to introduce flowtable offload for traffic
+received by the wlan NIC and forwarded to the ethernet one.
+
+Link: https://patch.msgid.link/20250823-airoha-en7581-wlan-rx-offload-v3-1-f78600ec3ed8@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -935,11 +935,10 @@ static int airoha_ppe_entry_idle_time(st
+ return airoha_ppe_get_entry_idle_time(ppe, e->data.ib1);
+ }
+
+-static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port,
++static int airoha_ppe_flow_offload_replace(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+ {
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+- struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ struct airoha_flow_data data = {};
+ struct net_device *odev = NULL;
+@@ -1136,10 +1135,9 @@ free_entry:
+ return err;
+ }
+
+-static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port,
++static int airoha_ppe_flow_offload_destroy(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+ {
+- struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+
+ e = rhashtable_lookup(ð->flow_table, &f->cookie,
+@@ -1182,10 +1180,9 @@ void airoha_ppe_foe_entry_get_stats(stru
+ rcu_read_unlock();
+ }
+
+-static int airoha_ppe_flow_offload_stats(struct airoha_gdm_port *port,
++static int airoha_ppe_flow_offload_stats(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+ {
+- struct airoha_eth *eth = port->qdma->eth;
+ struct airoha_flow_table_entry *e;
+ u32 idle;
+
+@@ -1209,16 +1206,16 @@ static int airoha_ppe_flow_offload_stats
+ return 0;
+ }
+
+-static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port,
++static int airoha_ppe_flow_offload_cmd(struct airoha_eth *eth,
+ struct flow_cls_offload *f)
+ {
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+- return airoha_ppe_flow_offload_replace(port, f);
++ return airoha_ppe_flow_offload_replace(eth, f);
+ case FLOW_CLS_DESTROY:
+- return airoha_ppe_flow_offload_destroy(port, f);
++ return airoha_ppe_flow_offload_destroy(eth, f);
+ case FLOW_CLS_STATS:
+- return airoha_ppe_flow_offload_stats(port, f);
++ return airoha_ppe_flow_offload_stats(eth, f);
+ default:
+ break;
+ }
+@@ -1288,7 +1285,6 @@ error_npu_put:
+ int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
+ {
+ struct airoha_gdm_port *port = netdev_priv(dev);
+- struct flow_cls_offload *cls = type_data;
+ struct airoha_eth *eth = port->qdma->eth;
+ int err = 0;
+
+@@ -1297,7 +1293,7 @@ int airoha_ppe_setup_tc_block_cb(struct
+ if (!eth->npu)
+ err = airoha_ppe_offload_setup(eth);
+ if (!err)
+- err = airoha_ppe_flow_offload_cmd(port, cls);
++ err = airoha_ppe_flow_offload_cmd(eth, type_data);
+
+ mutex_unlock(&flow_offload_mutex);
+
--- /dev/null
+From f45fc18b6de04483643e8aa2ab97737abfe03d59 Mon Sep 17 00:00:00 2001
+Date: Sat, 23 Aug 2025 09:56:03 +0200
+Subject: [PATCH 2/3] net: airoha: Add airoha_ppe_dev struct definition
+
+Introduce airoha_ppe_dev struct as container for PPE offload callbacks
+consumed by the MT76 driver during flowtable offload for traffic
+received by the wlan NIC and forwarded to the wired one.
+Add airoha_ppe_setup_tc_block_cb routine to PPE offload ops for MT76
+driver.
+Rely on airoha_ppe_dev pointer in airoha_ppe_setup_tc_block_cb
+signature.
+
+Link: https://patch.msgid.link/20250823-airoha-en7581-wlan-rx-offload-v3-2-f78600ec3ed8@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 4 +-
+ drivers/net/ethernet/airoha/airoha_eth.h | 4 +-
+ drivers/net/ethernet/airoha/airoha_npu.c | 1 -
+ drivers/net/ethernet/airoha/airoha_ppe.c | 67 +++++++++++++++++++++--
+ include/linux/soc/airoha/airoha_offload.h | 35 ++++++++++++
+ 5 files changed, 104 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -2602,13 +2602,15 @@ static int airoha_dev_setup_tc_block_cb(
+ void *type_data, void *cb_priv)
+ {
+ struct net_device *dev = cb_priv;
++ struct airoha_gdm_port *port = netdev_priv(dev);
++ struct airoha_eth *eth = port->qdma->eth;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+- return airoha_ppe_setup_tc_block_cb(dev, type_data);
++ return airoha_ppe_setup_tc_block_cb(ð->ppe->dev, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return airoha_dev_tc_matchall(dev, type_data);
+ default:
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -13,6 +13,7 @@
+ #include <linux/kernel.h>
+ #include <linux/netdevice.h>
+ #include <linux/reset.h>
++#include <linux/soc/airoha/airoha_offload.h>
+ #include <net/dsa.h>
+
+ #define AIROHA_MAX_NUM_GDM_PORTS 4
+@@ -546,6 +547,7 @@ struct airoha_gdm_port {
+ #define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0)
+
+ struct airoha_ppe {
++ struct airoha_ppe_dev dev;
+ struct airoha_eth *eth;
+
+ void *foe;
+@@ -622,7 +624,7 @@ bool airoha_is_valid_gdm_port(struct air
+
+ void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+ u16 hash);
+-int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data);
++int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
+ int airoha_ppe_init(struct airoha_eth *eth);
+ void airoha_ppe_deinit(struct airoha_eth *eth);
+ void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port);
+--- a/drivers/net/ethernet/airoha/airoha_npu.c
++++ b/drivers/net/ethernet/airoha/airoha_npu.c
+@@ -11,7 +11,6 @@
+ #include <linux/of_platform.h>
+ #include <linux/of_reserved_mem.h>
+ #include <linux/regmap.h>
+-#include <linux/soc/airoha/airoha_offload.h>
+
+ #include "airoha_eth.h"
+
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -6,8 +6,9 @@
+
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
+ #include <linux/rhashtable.h>
+-#include <linux/soc/airoha/airoha_offload.h>
+ #include <net/ipv6.h>
+ #include <net/pkt_cls.h>
+
+@@ -1282,10 +1283,10 @@ error_npu_put:
+ return err;
+ }
+
+-int airoha_ppe_setup_tc_block_cb(struct net_device *dev, void *type_data)
++int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data)
+ {
+- struct airoha_gdm_port *port = netdev_priv(dev);
+- struct airoha_eth *eth = port->qdma->eth;
++ struct airoha_ppe *ppe = dev->priv;
++ struct airoha_eth *eth = ppe->eth;
+ int err = 0;
+
+ mutex_lock(&flow_offload_mutex);
+@@ -1338,6 +1339,61 @@ void airoha_ppe_init_upd_mem(struct airo
+ PPE_UPDMEM_WR_MASK | PPE_UPDMEM_REQ_MASK);
+ }
+
++struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
++{
++ struct platform_device *pdev;
++ struct device_node *np;
++ struct airoha_eth *eth;
++
++ np = of_parse_phandle(dev->of_node, "airoha,eth", 0);
++ if (!np)
++ return ERR_PTR(-ENODEV);
++
++ pdev = of_find_device_by_node(np);
++ if (!pdev) {
++ dev_err(dev, "cannot find device node %s\n", np->name);
++ of_node_put(np);
++ return ERR_PTR(-ENODEV);
++ }
++ of_node_put(np);
++
++ if (!try_module_get(THIS_MODULE)) {
++ dev_err(dev, "failed to get the device driver module\n");
++ goto error_pdev_put;
++ }
++
++ eth = platform_get_drvdata(pdev);
++ if (!eth)
++ goto error_module_put;
++
++ if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) {
++ dev_err(&pdev->dev,
++ "failed to create device link to consumer %s\n",
++ dev_name(dev));
++ goto error_module_put;
++ }
++
++ return ð->ppe->dev;
++
++error_module_put:
++ module_put(THIS_MODULE);
++error_pdev_put:
++ platform_device_put(pdev);
++
++ return ERR_PTR(-ENODEV);
++}
++EXPORT_SYMBOL_GPL(airoha_ppe_get_dev);
++
++void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
++{
++ struct airoha_ppe *ppe = dev->priv;
++ struct airoha_eth *eth = ppe->eth;
++
++ module_put(THIS_MODULE);
++ put_device(eth->dev);
++}
++EXPORT_SYMBOL_GPL(airoha_ppe_put_dev);
++
+ int airoha_ppe_init(struct airoha_eth *eth)
+ {
+ struct airoha_ppe *ppe;
+@@ -1347,6 +1403,9 @@ int airoha_ppe_init(struct airoha_eth *e
+ if (!ppe)
+ return -ENOMEM;
+
++ ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
++ ppe->dev.priv = ppe;
++
+ foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+ ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma,
+ GFP_KERNEL);
+--- a/include/linux/soc/airoha/airoha_offload.h
++++ b/include/linux/soc/airoha/airoha_offload.h
+@@ -9,6 +9,41 @@
+ #include <linux/spinlock.h>
+ #include <linux/workqueue.h>
+
++struct airoha_ppe_dev {
++ struct {
++ int (*setup_tc_block_cb)(struct airoha_ppe_dev *dev,
++ void *type_data);
++ } ops;
++
++ void *priv;
++};
++
++#if (IS_BUILTIN(CONFIG_NET_AIROHA) || IS_MODULE(CONFIG_NET_AIROHA))
++struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev);
++void airoha_ppe_put_dev(struct airoha_ppe_dev *dev);
++
++static inline int airoha_ppe_dev_setup_tc_block_cb(struct airoha_ppe_dev *dev,
++ void *type_data)
++{
++ return dev->ops.setup_tc_block_cb(dev, type_data);
++}
++#else
++static inline struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
++{
++ return NULL;
++}
++
++static inline void airoha_ppe_put_dev(struct airoha_ppe_dev *dev)
++{
++}
++
++static inline int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev,
++ void *type_data)
++{
++ return -EOPNOTSUPP;
++}
++#endif
++
+ #define NPU_NUM_CORES 8
+ #define NPU_NUM_IRQ 6
+ #define NPU_RX0_DESC_NUM 512
--- /dev/null
+From a7cc1aa151e3a9c0314b995f06102f7763d3bd71 Mon Sep 17 00:00:00 2001
+Date: Sat, 23 Aug 2025 09:56:04 +0200
+Subject: [PATCH 3/3] net: airoha: Introduce check_skb callback in ppe_dev ops
+
+Export airoha_ppe_check_skb routine in ppe_dev ops. check_skb callback
+will be used by the MT76 driver in order to offload the traffic received
+by the wlan NIC and forwarded to the ethernet one.
+Add rx_wlan parameter to airoha_ppe_check_skb routine signature.
+
+Link: https://patch.msgid.link/20250823-airoha-en7581-wlan-rx-offload-v3-3-f78600ec3ed8@kernel.org
+---
+ drivers/net/ethernet/airoha/airoha_eth.c | 3 ++-
+ drivers/net/ethernet/airoha/airoha_eth.h | 8 ++------
+ drivers/net/ethernet/airoha/airoha_ppe.c | 25 +++++++++++++----------
+ include/linux/soc/airoha/airoha_offload.h | 20 ++++++++++++++++++
+ 4 files changed, 38 insertions(+), 18 deletions(-)
+
+--- a/drivers/net/ethernet/airoha/airoha_eth.c
++++ b/drivers/net/ethernet/airoha/airoha_eth.c
+@@ -703,7 +703,8 @@ static int airoha_qdma_rx_process(struct
+
+ reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1);
+ if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+- airoha_ppe_check_skb(eth->ppe, q->skb, hash);
++ airoha_ppe_check_skb(ð->ppe->dev, q->skb, hash,
++ false);
+
+ done++;
+ napi_gro_receive(&q->napi, q->skb);
+--- a/drivers/net/ethernet/airoha/airoha_eth.h
++++ b/drivers/net/ethernet/airoha/airoha_eth.h
+@@ -230,10 +230,6 @@ struct airoha_hw_stats {
+ };
+
+ enum {
+- PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+-};
+-
+-enum {
+ AIROHA_FOE_STATE_INVALID,
+ AIROHA_FOE_STATE_UNBIND,
+ AIROHA_FOE_STATE_BIND,
+@@ -622,8 +618,8 @@ static inline bool airhoa_is_lan_gdm_por
+ bool airoha_is_valid_gdm_port(struct airoha_eth *eth,
+ struct airoha_gdm_port *port);
+
+-void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+- u16 hash);
++void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
++ u16 hash, bool rx_wlan);
+ int airoha_ppe_setup_tc_block_cb(struct airoha_ppe_dev *dev, void *type_data);
+ int airoha_ppe_init(struct airoha_eth *eth);
+ void airoha_ppe_deinit(struct airoha_eth *eth);
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -616,7 +616,7 @@ static bool airoha_ppe_foe_compare_entry
+
+ static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe,
+ struct airoha_foe_entry *e,
+- u32 hash)
++ u32 hash, bool rx_wlan)
+ {
+ struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe);
+ u32 ts = airoha_ppe_get_timestamp(ppe);
+@@ -639,7 +639,8 @@ static int airoha_ppe_foe_commit_entry(s
+ goto unlock;
+ }
+
+- airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
++ if (!rx_wlan)
++ airoha_ppe_foe_flow_stats_update(ppe, npu, hwe, hash);
+
+ if (hash < PPE_SRAM_NUM_ENTRIES) {
+ dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe);
+@@ -665,7 +666,7 @@ static void airoha_ppe_foe_remove_flow(s
+ e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE;
+ e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE,
+ AIROHA_FOE_STATE_INVALID);
+- airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash);
++ airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash, false);
+ e->hash = 0xffff;
+ }
+ if (e->type == FLOW_TYPE_L2_SUBFLOW) {
+@@ -704,7 +705,7 @@ static void airoha_ppe_foe_flow_remove_e
+ static int
+ airoha_ppe_foe_commit_subflow_entry(struct airoha_ppe *ppe,
+ struct airoha_flow_table_entry *e,
+- u32 hash)
++ u32 hash, bool rx_wlan)
+ {
+ u32 mask = AIROHA_FOE_IB1_BIND_PACKET_TYPE | AIROHA_FOE_IB1_BIND_UDP;
+ struct airoha_foe_entry *hwe_p, hwe;
+@@ -745,14 +746,14 @@ airoha_ppe_foe_commit_subflow_entry(stru
+ }
+
+ hwe.bridge.data = e->data.bridge.data;
+- airoha_ppe_foe_commit_entry(ppe, &hwe, hash);
++ airoha_ppe_foe_commit_entry(ppe, &hwe, hash, rx_wlan);
+
+ return 0;
+ }
+
+ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
+ struct sk_buff *skb,
+- u32 hash)
++ u32 hash, bool rx_wlan)
+ {
+ struct airoha_flow_table_entry *e;
+ struct airoha_foe_bridge br = {};
+@@ -785,7 +786,7 @@ static void airoha_ppe_foe_insert_entry(
+ if (!airoha_ppe_foe_compare_entry(e, hwe))
+ continue;
+
+- airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
++ airoha_ppe_foe_commit_entry(ppe, &e->data, hash, rx_wlan);
+ commit_done = true;
+ e->hash = hash;
+ }
+@@ -797,7 +798,7 @@ static void airoha_ppe_foe_insert_entry(
+ e = rhashtable_lookup_fast(&ppe->l2_flows, &br,
+ airoha_l2_flow_table_params);
+ if (e)
+- airoha_ppe_foe_commit_subflow_entry(ppe, e, hash);
++ airoha_ppe_foe_commit_subflow_entry(ppe, e, hash, rx_wlan);
+ unlock:
+ spin_unlock_bh(&ppe_lock);
+ }
+@@ -1301,9 +1302,10 @@ int airoha_ppe_setup_tc_block_cb(struct
+ return err;
+ }
+
+-void airoha_ppe_check_skb(struct airoha_ppe *ppe, struct sk_buff *skb,
+- u16 hash)
++void airoha_ppe_check_skb(struct airoha_ppe_dev *dev, struct sk_buff *skb,
++ u16 hash, bool rx_wlan)
+ {
++ struct airoha_ppe *ppe = dev->priv;
+ u16 now, diff;
+
+ if (hash > PPE_HASH_MASK)
+@@ -1315,7 +1317,7 @@ void airoha_ppe_check_skb(struct airoha_
+ return;
+
+ ppe->foe_check_time[hash] = now;
+- airoha_ppe_foe_insert_entry(ppe, skb, hash);
++ airoha_ppe_foe_insert_entry(ppe, skb, hash, rx_wlan);
+ }
+
+ void airoha_ppe_init_upd_mem(struct airoha_gdm_port *port)
+@@ -1404,6 +1406,7 @@ int airoha_ppe_init(struct airoha_eth *e
+ return -ENOMEM;
+
+ ppe->dev.ops.setup_tc_block_cb = airoha_ppe_setup_tc_block_cb;
++ ppe->dev.ops.check_skb = airoha_ppe_check_skb;
+ ppe->dev.priv = ppe;
+
+ foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry);
+--- a/include/linux/soc/airoha/airoha_offload.h
++++ b/include/linux/soc/airoha/airoha_offload.h
+@@ -9,10 +9,17 @@
+ #include <linux/spinlock.h>
+ #include <linux/workqueue.h>
+
++enum {
++ PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
++};
++
+ struct airoha_ppe_dev {
+ struct {
+ int (*setup_tc_block_cb)(struct airoha_ppe_dev *dev,
+ void *type_data);
++ void (*check_skb)(struct airoha_ppe_dev *dev,
++ struct sk_buff *skb, u16 hash,
++ bool rx_wlan);
+ } ops;
+
+ void *priv;
+@@ -27,6 +34,13 @@ static inline int airoha_ppe_dev_setup_t
+ {
+ return dev->ops.setup_tc_block_cb(dev, type_data);
+ }
++
++static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
++ struct sk_buff *skb,
++ u16 hash, bool rx_wlan)
++{
++ dev->ops.check_skb(dev, skb, hash, rx_wlan);
++}
+ #else
+ static inline struct airoha_ppe_dev *airoha_ppe_get_dev(struct device *dev)
+ {
+@@ -42,6 +56,12 @@ static inline int airoha_ppe_setup_tc_bl
+ {
+ return -EOPNOTSUPP;
+ }
++
++static inline void airoha_ppe_dev_check_skb(struct airoha_ppe_dev *dev,
++ struct sk_buff *skb, u16 hash,
++ bool rx_wlan)
++{
++}
+ #endif
+
+ #define NPU_NUM_CORES 8
--- a/drivers/net/ethernet/airoha/airoha_eth.c
+++ b/drivers/net/ethernet/airoha/airoha_eth.c
-@@ -2064,11 +2064,14 @@ static int airoha_qdma_get_tx_ets_stats(
+@@ -2184,11 +2184,14 @@ static int airoha_qdma_get_tx_ets_stats(
static int airoha_tc_setup_qdisc_ets(struct airoha_gdm_port *port,
struct tc_ets_qopt_offload *opt)
{