generic: rename backport patches to names containing version
authorMieczyslaw Nalewaj <[email protected]>
Sat, 15 Mar 2025 11:14:12 +0000 (12:14 +0100)
committerHauke Mehrtens <[email protected]>
Sun, 30 Mar 2025 16:19:58 +0000 (18:19 +0200)
Rename 770-net-introduce-napi_is_scheduled-helper.patch
to 770-v6.7-net-introduce-napi_is_scheduled-helper.patch
because it is used since kernel 6.7 (https://lore.kernel.org/lkml/20231028011741.2400327[email protected]/).
Link: https://web.git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-6.7.y&id=7f3eb2174512fe6c9c0f062e96eccb0d3cc6d5cd
Rename 751-01-STABLE-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch
to 751-01-v6.8-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch
because it is used since kernel 6.8.12 (https://lore.kernel.org/lkml/2024053036-matron-confess-13e0@gregkh/).
Link: https://web.git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-6.8.y&id=b411384df5814fe6fd861d4869607577bcec73a1
Rename 751-02-STABLE-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch
to 751-02-v6.8-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch
because it is used since kernel 6.8.12 (https://lore.kernel.org/lkml/2024053036-matron-confess-13e0@gregkh/).
Link: https://web.git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-6.8.y&id=0849f56b5146b70f2da328b1d178d0a3c53d6846
Signed-off-by: Mieczyslaw Nalewaj <[email protected]>
Link: https://github.com/openwrt/openwrt/pull/18253
Signed-off-by: Hauke Mehrtens <[email protected]>
target/linux/generic/backport-6.6/751-01-STABLE-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch [deleted file]
target/linux/generic/backport-6.6/751-01-v6.8-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch [new file with mode: 0644]
target/linux/generic/backport-6.6/751-02-STABLE-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch [deleted file]
target/linux/generic/backport-6.6/751-02-v6.8-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch [new file with mode: 0644]
target/linux/generic/backport-6.6/770-net-introduce-napi_is_scheduled-helper.patch [deleted file]
target/linux/generic/backport-6.6/770-v6.7-net-introduce-napi_is_scheduled-helper.patch [new file with mode: 0644]

diff --git a/target/linux/generic/backport-6.6/751-01-STABLE-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch b/target/linux/generic/backport-6.6/751-01-STABLE-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch
deleted file mode 100644 (file)
index 22aceec..0000000
+++ /dev/null
@@ -1,605 +0,0 @@
-From 5d0fad48d2dec175ecb999974b94203c577973ef Mon Sep 17 00:00:00 2001
-From: Lorenzo Bianconi <[email protected]>
-Date: Wed, 8 May 2024 11:43:34 +0100
-Subject: [PATCH] net: ethernet: mediatek: split tx and rx fields in
- mtk_soc_data struct
-
-Split tx and rx fields in mtk_soc_data struct. This is a preliminary
-patch to roll back to ADMAv1 for MT7986 and MT7981 SoC in order to fix a
-hw hang if the device receives a corrupted packet when using ADMAv2.0.
-
-Fixes: 197c9e9b17b1 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset")
-Signed-off-by: Lorenzo Bianconi <[email protected]>
-Signed-off-by: Daniel Golle <[email protected]>
-Reviewed-by: Przemek Kitszel <[email protected]>
-Link: https://lore.kernel.org/r/70a799b1f060ec2f57883e88ccb420ac0fb0abb5.1715164770.git.daniel@makrotopia.org
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
- drivers/net/ethernet/mediatek/mtk_eth_soc.h |  29 +--
- 2 files changed, 139 insertions(+), 100 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1138,7 +1138,7 @@ static int mtk_init_fq_dma(struct mtk_et
-               eth->scratch_ring = eth->sram_base;
-       else
-               eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
--                                                     cnt * soc->txrx.txd_size,
-+                                                     cnt * soc->tx.desc_size,
-                                                      &eth->phy_scratch_ring,
-                                                      GFP_KERNEL);
-       if (unlikely(!eth->scratch_ring))
-@@ -1154,16 +1154,16 @@ static int mtk_init_fq_dma(struct mtk_et
-       if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
-               return -ENOMEM;
--      phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
-+      phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
-       for (i = 0; i < cnt; i++) {
-               struct mtk_tx_dma_v2 *txd;
--              txd = eth->scratch_ring + i * soc->txrx.txd_size;
-+              txd = eth->scratch_ring + i * soc->tx.desc_size;
-               txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
-               if (i < cnt - 1)
-                       txd->txd2 = eth->phy_scratch_ring +
--                                  (i + 1) * soc->txrx.txd_size;
-+                                  (i + 1) * soc->tx.desc_size;
-               txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
-               txd->txd4 = 0;
-@@ -1412,7 +1412,7 @@ static int mtk_tx_map(struct sk_buff *sk
-       if (itxd == ring->last_free)
-               return -ENOMEM;
--      itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
-+      itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
-       memset(itx_buf, 0, sizeof(*itx_buf));
-       txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
-@@ -1453,7 +1453,7 @@ static int mtk_tx_map(struct sk_buff *sk
-                       memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
-                       txd_info.size = min_t(unsigned int, frag_size,
--                                            soc->txrx.dma_max_len);
-+                                            soc->tx.dma_max_len);
-                       txd_info.qid = queue;
-                       txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
-                                       !(frag_size - txd_info.size);
-@@ -1466,7 +1466,7 @@ static int mtk_tx_map(struct sk_buff *sk
-                       mtk_tx_set_dma_desc(dev, txd, &txd_info);
-                       tx_buf = mtk_desc_to_tx_buf(ring, txd,
--                                                  soc->txrx.txd_size);
-+                                                  soc->tx.desc_size);
-                       if (new_desc)
-                               memset(tx_buf, 0, sizeof(*tx_buf));
-                       tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
-@@ -1509,7 +1509,7 @@ static int mtk_tx_map(struct sk_buff *sk
-       } else {
-               int next_idx;
--              next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
-+              next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
-                                        ring->dma_size);
-               mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
-       }
-@@ -1518,7 +1518,7 @@ static int mtk_tx_map(struct sk_buff *sk
- err_dma:
-       do {
--              tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
-+              tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
-               /* unmap dma */
-               mtk_tx_unmap(eth, tx_buf, NULL, false);
-@@ -1543,7 +1543,7 @@ static int mtk_cal_txd_req(struct mtk_et
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       frag = &skb_shinfo(skb)->frags[i];
-                       nfrags += DIV_ROUND_UP(skb_frag_size(frag),
--                                             eth->soc->txrx.dma_max_len);
-+                                             eth->soc->tx.dma_max_len);
-               }
-       } else {
-               nfrags += skb_shinfo(skb)->nr_frags;
-@@ -1650,7 +1650,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
-               ring = &eth->rx_ring[i];
-               idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
--              rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
-+              rxd = ring->dma + idx * eth->soc->rx.desc_size;
-               if (rxd->rxd2 & RX_DMA_DONE) {
-                       ring->calc_idx_update = true;
-                       return ring;
-@@ -1818,7 +1818,7 @@ static int mtk_xdp_submit_frame(struct m
-       }
-       htxd = txd;
--      tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
-+      tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
-       memset(tx_buf, 0, sizeof(*tx_buf));
-       htx_buf = tx_buf;
-@@ -1837,7 +1837,7 @@ static int mtk_xdp_submit_frame(struct m
-                               goto unmap;
-                       tx_buf = mtk_desc_to_tx_buf(ring, txd,
--                                                  soc->txrx.txd_size);
-+                                                  soc->tx.desc_size);
-                       memset(tx_buf, 0, sizeof(*tx_buf));
-                       n_desc++;
-               }
-@@ -1875,7 +1875,7 @@ static int mtk_xdp_submit_frame(struct m
-       } else {
-               int idx;
--              idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
-+              idx = txd_to_idx(ring, txd, soc->tx.desc_size);
-               mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
-                       MT7628_TX_CTX_IDX0);
-       }
-@@ -1886,7 +1886,7 @@ static int mtk_xdp_submit_frame(struct m
- unmap:
-       while (htxd != txd) {
--              tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
-+              tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
-               mtk_tx_unmap(eth, tx_buf, NULL, false);
-               htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
-@@ -2017,7 +2017,7 @@ static int mtk_poll_rx(struct napi_struc
-                       goto rx_done;
-               idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
--              rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
-+              rxd = ring->dma + idx * eth->soc->rx.desc_size;
-               data = ring->data[idx];
-               if (!mtk_rx_get_desc(eth, &trxd, rxd))
-@@ -2152,7 +2152,7 @@ static int mtk_poll_rx(struct napi_struc
-                       rxdcsum = &trxd.rxd4;
-               }
--              if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
-+              if (*rxdcsum & eth->soc->rx.dma_l4_valid)
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-               else
-                       skb_checksum_none_assert(skb);
-@@ -2276,7 +2276,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
-                       break;
-               tx_buf = mtk_desc_to_tx_buf(ring, desc,
--                                          eth->soc->txrx.txd_size);
-+                                          eth->soc->tx.desc_size);
-               if (!tx_buf->data)
-                       break;
-@@ -2327,7 +2327,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
-               }
-               mtk_tx_unmap(eth, tx_buf, &bq, true);
--              desc = ring->dma + cpu * eth->soc->txrx.txd_size;
-+              desc = ring->dma + cpu * eth->soc->tx.desc_size;
-               ring->last_free = desc;
-               atomic_inc(&ring->free_count);
-@@ -2417,7 +2417,7 @@ static int mtk_napi_rx(struct napi_struc
-       do {
-               int rx_done;
--              mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
-+              mtk_w32(eth, eth->soc->rx.irq_done_mask,
-                       reg_map->pdma.irq_status);
-               rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
-               rx_done_total += rx_done;
-@@ -2433,10 +2433,10 @@ static int mtk_napi_rx(struct napi_struc
-                       return budget;
-       } while (mtk_r32(eth, reg_map->pdma.irq_status) &
--               eth->soc->txrx.rx_irq_done_mask);
-+               eth->soc->rx.irq_done_mask);
-       if (napi_complete_done(napi, rx_done_total))
--              mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
-+              mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
-       return rx_done_total;
- }
-@@ -2445,7 +2445,7 @@ static int mtk_tx_alloc(struct mtk_eth *
- {
-       const struct mtk_soc_data *soc = eth->soc;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
--      int i, sz = soc->txrx.txd_size;
-+      int i, sz = soc->tx.desc_size;
-       struct mtk_tx_dma_v2 *txd;
-       int ring_size;
-       u32 ofs, val;
-@@ -2568,14 +2568,14 @@ static void mtk_tx_clean(struct mtk_eth
-       }
-       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
-               dma_free_coherent(eth->dma_dev,
--                                ring->dma_size * soc->txrx.txd_size,
-+                                ring->dma_size * soc->tx.desc_size,
-                                 ring->dma, ring->phys);
-               ring->dma = NULL;
-       }
-       if (ring->dma_pdma) {
-               dma_free_coherent(eth->dma_dev,
--                                ring->dma_size * soc->txrx.txd_size,
-+                                ring->dma_size * soc->tx.desc_size,
-                                 ring->dma_pdma, ring->phys_pdma);
-               ring->dma_pdma = NULL;
-       }
-@@ -2630,15 +2630,15 @@ static int mtk_rx_alloc(struct mtk_eth *
-       if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
-           rx_flag != MTK_RX_FLAGS_NORMAL) {
-               ring->dma = dma_alloc_coherent(eth->dma_dev,
--                                             rx_dma_size * eth->soc->txrx.rxd_size,
--                                             &ring->phys, GFP_KERNEL);
-+                              rx_dma_size * eth->soc->rx.desc_size,
-+                              &ring->phys, GFP_KERNEL);
-       } else {
-               struct mtk_tx_ring *tx_ring = &eth->tx_ring;
-               ring->dma = tx_ring->dma + tx_ring_size *
--                          eth->soc->txrx.txd_size * (ring_no + 1);
-+                          eth->soc->tx.desc_size * (ring_no + 1);
-               ring->phys = tx_ring->phys + tx_ring_size *
--                           eth->soc->txrx.txd_size * (ring_no + 1);
-+                           eth->soc->tx.desc_size * (ring_no + 1);
-       }
-       if (!ring->dma)
-@@ -2649,7 +2649,7 @@ static int mtk_rx_alloc(struct mtk_eth *
-               dma_addr_t dma_addr;
-               void *data;
--              rxd = ring->dma + i * eth->soc->txrx.rxd_size;
-+              rxd = ring->dma + i * eth->soc->rx.desc_size;
-               if (ring->page_pool) {
-                       data = mtk_page_pool_get_buff(ring->page_pool,
-                                                     &dma_addr, GFP_KERNEL);
-@@ -2740,7 +2740,7 @@ static void mtk_rx_clean(struct mtk_eth
-                       if (!ring->data[i])
-                               continue;
--                      rxd = ring->dma + i * eth->soc->txrx.rxd_size;
-+                      rxd = ring->dma + i * eth->soc->rx.desc_size;
-                       if (!rxd->rxd1)
-                               continue;
-@@ -2757,7 +2757,7 @@ static void mtk_rx_clean(struct mtk_eth
-       if (!in_sram && ring->dma) {
-               dma_free_coherent(eth->dma_dev,
--                                ring->dma_size * eth->soc->txrx.rxd_size,
-+                                ring->dma_size * eth->soc->rx.desc_size,
-                                 ring->dma, ring->phys);
-               ring->dma = NULL;
-       }
-@@ -3120,7 +3120,7 @@ static void mtk_dma_free(struct mtk_eth
-                       netdev_reset_queue(eth->netdev[i]);
-       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
-               dma_free_coherent(eth->dma_dev,
--                                MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
-+                                MTK_QDMA_RING_SIZE * soc->tx.desc_size,
-                                 eth->scratch_ring, eth->phy_scratch_ring);
-               eth->scratch_ring = NULL;
-               eth->phy_scratch_ring = 0;
-@@ -3170,7 +3170,7 @@ static irqreturn_t mtk_handle_irq_rx(int
-       eth->rx_events++;
-       if (likely(napi_schedule_prep(&eth->rx_napi))) {
--              mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
-+              mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
-               __napi_schedule(&eth->rx_napi);
-       }
-@@ -3196,9 +3196,9 @@ static irqreturn_t mtk_handle_irq(int ir
-       const struct mtk_reg_map *reg_map = eth->soc->reg_map;
-       if (mtk_r32(eth, reg_map->pdma.irq_mask) &
--          eth->soc->txrx.rx_irq_done_mask) {
-+          eth->soc->rx.irq_done_mask) {
-               if (mtk_r32(eth, reg_map->pdma.irq_status) &
--                  eth->soc->txrx.rx_irq_done_mask)
-+                  eth->soc->rx.irq_done_mask)
-                       mtk_handle_irq_rx(irq, _eth);
-       }
-       if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
-@@ -3216,10 +3216,10 @@ static void mtk_poll_controller(struct n
-       struct mtk_eth *eth = mac->hw;
-       mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
--      mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
-+      mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
-       mtk_handle_irq_rx(eth->irq[2], dev);
-       mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
--      mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
-+      mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
- }
- #endif
-@@ -3383,7 +3383,7 @@ static int mtk_open(struct net_device *d
-               napi_enable(&eth->tx_napi);
-               napi_enable(&eth->rx_napi);
-               mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
--              mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
-+              mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
-               refcount_set(&eth->dma_refcnt, 1);
-       }
-       else
-@@ -3467,7 +3467,7 @@ static int mtk_stop(struct net_device *d
-       mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
-       mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
--      mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
-+      mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
-       napi_disable(&eth->tx_napi);
-       napi_disable(&eth->rx_napi);
-@@ -3943,9 +3943,9 @@ static int mtk_hw_init(struct mtk_eth *e
-       /* FE int grouping */
-       mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
--      mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
-+      mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
-       mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
--      mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
-+      mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
-       mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
-       if (mtk_is_netsys_v3_or_greater(eth)) {
-@@ -5037,11 +5037,15 @@ static const struct mtk_soc_data mt2701_
-       .required_clks = MT7623_CLKS_BITMAP,
-       .required_pctl = true,
-       .version = 1,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma),
--              .rxd_size = sizeof(struct mtk_rx_dma),
--              .rx_irq_done_mask = MTK_RX_DONE_INT,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+              .dma_len_offset = 16,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma),
-+              .irq_done_mask = MTK_RX_DONE_INT,
-+              .dma_l4_valid = RX_DMA_L4_VALID,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN,
-               .dma_len_offset = 16,
-       },
-@@ -5057,11 +5061,15 @@ static const struct mtk_soc_data mt7621_
-       .offload_version = 1,
-       .hash_offset = 2,
-       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma),
--              .rxd_size = sizeof(struct mtk_rx_dma),
--              .rx_irq_done_mask = MTK_RX_DONE_INT,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+              .dma_len_offset = 16,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma),
-+              .irq_done_mask = MTK_RX_DONE_INT,
-+              .dma_l4_valid = RX_DMA_L4_VALID,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN,
-               .dma_len_offset = 16,
-       },
-@@ -5079,11 +5087,15 @@ static const struct mtk_soc_data mt7622_
-       .hash_offset = 2,
-       .has_accounting = true,
-       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma),
--              .rxd_size = sizeof(struct mtk_rx_dma),
--              .rx_irq_done_mask = MTK_RX_DONE_INT,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+              .dma_len_offset = 16,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma),
-+              .irq_done_mask = MTK_RX_DONE_INT,
-+              .dma_l4_valid = RX_DMA_L4_VALID,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN,
-               .dma_len_offset = 16,
-       },
-@@ -5100,11 +5112,15 @@ static const struct mtk_soc_data mt7623_
-       .hash_offset = 2,
-       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
-       .disable_pll_modes = true,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma),
--              .rxd_size = sizeof(struct mtk_rx_dma),
--              .rx_irq_done_mask = MTK_RX_DONE_INT,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+              .dma_len_offset = 16,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma),
-+              .irq_done_mask = MTK_RX_DONE_INT,
-+              .dma_l4_valid = RX_DMA_L4_VALID,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN,
-               .dma_len_offset = 16,
-       },
-@@ -5119,11 +5135,15 @@ static const struct mtk_soc_data mt7629_
-       .required_pctl = false,
-       .has_accounting = true,
-       .version = 1,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma),
--              .rxd_size = sizeof(struct mtk_rx_dma),
--              .rx_irq_done_mask = MTK_RX_DONE_INT,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+              .dma_len_offset = 16,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma),
-+              .irq_done_mask = MTK_RX_DONE_INT,
-+              .dma_l4_valid = RX_DMA_L4_VALID,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN,
-               .dma_len_offset = 16,
-       },
-@@ -5141,11 +5161,15 @@ static const struct mtk_soc_data mt7981_
-       .hash_offset = 4,
-       .has_accounting = true,
-       .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma_v2),
--              .rxd_size = sizeof(struct mtk_rx_dma_v2),
--              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma_v2),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-+              .dma_len_offset = 8,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma_v2),
-+              .irq_done_mask = MTK_RX_DONE_INT_V2,
-+              .dma_l4_valid = RX_DMA_L4_VALID_V2,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-               .dma_len_offset = 8,
-       },
-@@ -5163,11 +5187,15 @@ static const struct mtk_soc_data mt7986_
-       .hash_offset = 4,
-       .has_accounting = true,
-       .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma_v2),
--              .rxd_size = sizeof(struct mtk_rx_dma_v2),
--              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma_v2),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-+              .dma_len_offset = 8,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma_v2),
-+              .irq_done_mask = MTK_RX_DONE_INT_V2,
-+              .dma_l4_valid = RX_DMA_L4_VALID_V2,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-               .dma_len_offset = 8,
-       },
-@@ -5185,11 +5213,15 @@ static const struct mtk_soc_data mt7988_
-       .hash_offset = 4,
-       .has_accounting = true,
-       .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma_v2),
--              .rxd_size = sizeof(struct mtk_rx_dma_v2),
--              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma_v2),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-+              .dma_len_offset = 8,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma_v2),
-+              .irq_done_mask = MTK_RX_DONE_INT_V2,
-+              .dma_l4_valid = RX_DMA_L4_VALID_V2,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-               .dma_len_offset = 8,
-       },
-@@ -5202,11 +5234,15 @@ static const struct mtk_soc_data rt5350_
-       .required_clks = MT7628_CLKS_BITMAP,
-       .required_pctl = false,
-       .version = 1,
--      .txrx = {
--              .txd_size = sizeof(struct mtk_tx_dma),
--              .rxd_size = sizeof(struct mtk_rx_dma),
--              .rx_irq_done_mask = MTK_RX_DONE_INT,
--              .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
-+      .tx = {
-+              .desc_size = sizeof(struct mtk_tx_dma),
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+              .dma_len_offset = 16,
-+      },
-+      .rx = {
-+              .desc_size = sizeof(struct mtk_rx_dma),
-+              .irq_done_mask = MTK_RX_DONE_INT,
-+              .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
-               .dma_max_len = MTK_TX_DMA_BUF_LEN,
-               .dma_len_offset = 16,
-       },
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -327,8 +327,8 @@
- /* QDMA descriptor txd3 */
- #define TX_DMA_OWNER_CPU      BIT(31)
- #define TX_DMA_LS0            BIT(30)
--#define TX_DMA_PLEN0(x)               (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
--#define TX_DMA_PLEN1(x)               ((x) & eth->soc->txrx.dma_max_len)
-+#define TX_DMA_PLEN0(x)               (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
-+#define TX_DMA_PLEN1(x)               ((x) & eth->soc->tx.dma_max_len)
- #define TX_DMA_SWC            BIT(14)
- #define TX_DMA_PQID           GENMASK(3, 0)
- #define TX_DMA_ADDR64_MASK    GENMASK(3, 0)
-@@ -348,8 +348,8 @@
- /* QDMA descriptor rxd2 */
- #define RX_DMA_DONE           BIT(31)
- #define RX_DMA_LSO            BIT(30)
--#define RX_DMA_PREP_PLEN0(x)  (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
--#define RX_DMA_GET_PLEN0(x)   (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
-+#define RX_DMA_PREP_PLEN0(x)  (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
-+#define RX_DMA_GET_PLEN0(x)   (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
- #define RX_DMA_VTAG           BIT(15)
- #define RX_DMA_ADDR64_MASK    GENMASK(3, 0)
- #if IS_ENABLED(CONFIG_64BIT)
-@@ -1153,10 +1153,9 @@ struct mtk_reg_map {
-  * @foe_entry_size            Foe table entry size.
-  * @has_accounting            Bool indicating support for accounting of
-  *                            offloaded flows.
-- * @txd_size                  Tx DMA descriptor size.
-- * @rxd_size                  Rx DMA descriptor size.
-- * @rx_irq_done_mask          Rx irq done register mask.
-- * @rx_dma_l4_valid           Rx DMA valid register mask.
-+ * @desc_size                 Tx/Rx DMA descriptor size.
-+ * @irq_done_mask             Rx irq done register mask.
-+ * @dma_l4_valid              Rx DMA valid register mask.
-  * @dma_max_len                       Max DMA tx/rx buffer length.
-  * @dma_len_offset            Tx/Rx DMA length field offset.
-  */
-@@ -1174,13 +1173,17 @@ struct mtk_soc_data {
-       bool            has_accounting;
-       bool            disable_pll_modes;
-       struct {
--              u32     txd_size;
--              u32     rxd_size;
--              u32     rx_irq_done_mask;
--              u32     rx_dma_l4_valid;
-+              u32     desc_size;
-               u32     dma_max_len;
-               u32     dma_len_offset;
--      } txrx;
-+      } tx;
-+      struct {
-+              u32     desc_size;
-+              u32     irq_done_mask;
-+              u32     dma_l4_valid;
-+              u32     dma_max_len;
-+              u32     dma_len_offset;
-+      } rx;
- };
- #define MTK_DMA_MONITOR_TIMEOUT               msecs_to_jiffies(1000)
diff --git a/target/linux/generic/backport-6.6/751-01-v6.8-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch b/target/linux/generic/backport-6.6/751-01-v6.8-net-ethernet-mediatek-split-tx-and-rx-fields-in-mtk_.patch
new file mode 100644 (file)
index 0000000..22aceec
--- /dev/null
@@ -0,0 +1,605 @@
+From 5d0fad48d2dec175ecb999974b94203c577973ef Mon Sep 17 00:00:00 2001
+From: Lorenzo Bianconi <[email protected]>
+Date: Wed, 8 May 2024 11:43:34 +0100
+Subject: [PATCH] net: ethernet: mediatek: split tx and rx fields in
+ mtk_soc_data struct
+
+Split tx and rx fields in mtk_soc_data struct. This is a preliminary
+patch to roll back to ADMAv1 for MT7986 and MT7981 SoC in order to fix a
+hw hang if the device receives a corrupted packet when using ADMAv2.0.
+
+Fixes: 197c9e9b17b1 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset")
+Signed-off-by: Lorenzo Bianconi <[email protected]>
+Signed-off-by: Daniel Golle <[email protected]>
+Reviewed-by: Przemek Kitszel <[email protected]>
+Link: https://lore.kernel.org/r/70a799b1f060ec2f57883e88ccb420ac0fb0abb5.1715164770.git.daniel@makrotopia.org
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h |  29 +--
+ 2 files changed, 139 insertions(+), 100 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1138,7 +1138,7 @@ static int mtk_init_fq_dma(struct mtk_et
+               eth->scratch_ring = eth->sram_base;
+       else
+               eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+-                                                     cnt * soc->txrx.txd_size,
++                                                     cnt * soc->tx.desc_size,
+                                                      &eth->phy_scratch_ring,
+                                                      GFP_KERNEL);
+       if (unlikely(!eth->scratch_ring))
+@@ -1154,16 +1154,16 @@ static int mtk_init_fq_dma(struct mtk_et
+       if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+               return -ENOMEM;
+-      phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
++      phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+       for (i = 0; i < cnt; i++) {
+               struct mtk_tx_dma_v2 *txd;
+-              txd = eth->scratch_ring + i * soc->txrx.txd_size;
++              txd = eth->scratch_ring + i * soc->tx.desc_size;
+               txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+               if (i < cnt - 1)
+                       txd->txd2 = eth->phy_scratch_ring +
+-                                  (i + 1) * soc->txrx.txd_size;
++                                  (i + 1) * soc->tx.desc_size;
+               txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+               txd->txd4 = 0;
+@@ -1412,7 +1412,7 @@ static int mtk_tx_map(struct sk_buff *sk
+       if (itxd == ring->last_free)
+               return -ENOMEM;
+-      itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++      itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+       memset(itx_buf, 0, sizeof(*itx_buf));
+       txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+@@ -1453,7 +1453,7 @@ static int mtk_tx_map(struct sk_buff *sk
+                       memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+                       txd_info.size = min_t(unsigned int, frag_size,
+-                                            soc->txrx.dma_max_len);
++                                            soc->tx.dma_max_len);
+                       txd_info.qid = queue;
+                       txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+                                       !(frag_size - txd_info.size);
+@@ -1466,7 +1466,7 @@ static int mtk_tx_map(struct sk_buff *sk
+                       mtk_tx_set_dma_desc(dev, txd, &txd_info);
+                       tx_buf = mtk_desc_to_tx_buf(ring, txd,
+-                                                  soc->txrx.txd_size);
++                                                  soc->tx.desc_size);
+                       if (new_desc)
+                               memset(tx_buf, 0, sizeof(*tx_buf));
+                       tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+@@ -1509,7 +1509,7 @@ static int mtk_tx_map(struct sk_buff *sk
+       } else {
+               int next_idx;
+-              next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
++              next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
+                                        ring->dma_size);
+               mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+       }
+@@ -1518,7 +1518,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ err_dma:
+       do {
+-              tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++              tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+               /* unmap dma */
+               mtk_tx_unmap(eth, tx_buf, NULL, false);
+@@ -1543,7 +1543,7 @@ static int mtk_cal_txd_req(struct mtk_et
+               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+                       frag = &skb_shinfo(skb)->frags[i];
+                       nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+-                                             eth->soc->txrx.dma_max_len);
++                                             eth->soc->tx.dma_max_len);
+               }
+       } else {
+               nfrags += skb_shinfo(skb)->nr_frags;
+@@ -1650,7 +1650,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+               ring = &eth->rx_ring[i];
+               idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+-              rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++              rxd = ring->dma + idx * eth->soc->rx.desc_size;
+               if (rxd->rxd2 & RX_DMA_DONE) {
+                       ring->calc_idx_update = true;
+                       return ring;
+@@ -1818,7 +1818,7 @@ static int mtk_xdp_submit_frame(struct m
+       }
+       htxd = txd;
+-      tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++      tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
+       memset(tx_buf, 0, sizeof(*tx_buf));
+       htx_buf = tx_buf;
+@@ -1837,7 +1837,7 @@ static int mtk_xdp_submit_frame(struct m
+                               goto unmap;
+                       tx_buf = mtk_desc_to_tx_buf(ring, txd,
+-                                                  soc->txrx.txd_size);
++                                                  soc->tx.desc_size);
+                       memset(tx_buf, 0, sizeof(*tx_buf));
+                       n_desc++;
+               }
+@@ -1875,7 +1875,7 @@ static int mtk_xdp_submit_frame(struct m
+       } else {
+               int idx;
+-              idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++              idx = txd_to_idx(ring, txd, soc->tx.desc_size);
+               mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+                       MT7628_TX_CTX_IDX0);
+       }
+@@ -1886,7 +1886,7 @@ static int mtk_xdp_submit_frame(struct m
+ unmap:
+       while (htxd != txd) {
+-              tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++              tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
+               mtk_tx_unmap(eth, tx_buf, NULL, false);
+               htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+@@ -2017,7 +2017,7 @@ static int mtk_poll_rx(struct napi_struc
+                       goto rx_done;
+               idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+-              rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++              rxd = ring->dma + idx * eth->soc->rx.desc_size;
+               data = ring->data[idx];
+               if (!mtk_rx_get_desc(eth, &trxd, rxd))
+@@ -2152,7 +2152,7 @@ static int mtk_poll_rx(struct napi_struc
+                       rxdcsum = &trxd.rxd4;
+               }
+-              if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
++              if (*rxdcsum & eth->soc->rx.dma_l4_valid)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+               else
+                       skb_checksum_none_assert(skb);
+@@ -2276,7 +2276,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+                       break;
+               tx_buf = mtk_desc_to_tx_buf(ring, desc,
+-                                          eth->soc->txrx.txd_size);
++                                          eth->soc->tx.desc_size);
+               if (!tx_buf->data)
+                       break;
+@@ -2327,7 +2327,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+               }
+               mtk_tx_unmap(eth, tx_buf, &bq, true);
+-              desc = ring->dma + cpu * eth->soc->txrx.txd_size;
++              desc = ring->dma + cpu * eth->soc->tx.desc_size;
+               ring->last_free = desc;
+               atomic_inc(&ring->free_count);
+@@ -2417,7 +2417,7 @@ static int mtk_napi_rx(struct napi_struc
+       do {
+               int rx_done;
+-              mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
++              mtk_w32(eth, eth->soc->rx.irq_done_mask,
+                       reg_map->pdma.irq_status);
+               rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+               rx_done_total += rx_done;
+@@ -2433,10 +2433,10 @@ static int mtk_napi_rx(struct napi_struc
+                       return budget;
+       } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+-               eth->soc->txrx.rx_irq_done_mask);
++               eth->soc->rx.irq_done_mask);
+       if (napi_complete_done(napi, rx_done_total))
+-              mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++              mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+       return rx_done_total;
+ }
+@@ -2445,7 +2445,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ {
+       const struct mtk_soc_data *soc = eth->soc;
+       struct mtk_tx_ring *ring = &eth->tx_ring;
+-      int i, sz = soc->txrx.txd_size;
++      int i, sz = soc->tx.desc_size;
+       struct mtk_tx_dma_v2 *txd;
+       int ring_size;
+       u32 ofs, val;
+@@ -2568,14 +2568,14 @@ static void mtk_tx_clean(struct mtk_eth
+       }
+       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
+               dma_free_coherent(eth->dma_dev,
+-                                ring->dma_size * soc->txrx.txd_size,
++                                ring->dma_size * soc->tx.desc_size,
+                                 ring->dma, ring->phys);
+               ring->dma = NULL;
+       }
+       if (ring->dma_pdma) {
+               dma_free_coherent(eth->dma_dev,
+-                                ring->dma_size * soc->txrx.txd_size,
++                                ring->dma_size * soc->tx.desc_size,
+                                 ring->dma_pdma, ring->phys_pdma);
+               ring->dma_pdma = NULL;
+       }
+@@ -2630,15 +2630,15 @@ static int mtk_rx_alloc(struct mtk_eth *
+       if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+           rx_flag != MTK_RX_FLAGS_NORMAL) {
+               ring->dma = dma_alloc_coherent(eth->dma_dev,
+-                                             rx_dma_size * eth->soc->txrx.rxd_size,
+-                                             &ring->phys, GFP_KERNEL);
++                              rx_dma_size * eth->soc->rx.desc_size,
++                              &ring->phys, GFP_KERNEL);
+       } else {
+               struct mtk_tx_ring *tx_ring = &eth->tx_ring;
+               ring->dma = tx_ring->dma + tx_ring_size *
+-                          eth->soc->txrx.txd_size * (ring_no + 1);
++                          eth->soc->tx.desc_size * (ring_no + 1);
+               ring->phys = tx_ring->phys + tx_ring_size *
+-                           eth->soc->txrx.txd_size * (ring_no + 1);
++                           eth->soc->tx.desc_size * (ring_no + 1);
+       }
+       if (!ring->dma)
+@@ -2649,7 +2649,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+               dma_addr_t dma_addr;
+               void *data;
+-              rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++              rxd = ring->dma + i * eth->soc->rx.desc_size;
+               if (ring->page_pool) {
+                       data = mtk_page_pool_get_buff(ring->page_pool,
+                                                     &dma_addr, GFP_KERNEL);
+@@ -2740,7 +2740,7 @@ static void mtk_rx_clean(struct mtk_eth
+                       if (!ring->data[i])
+                               continue;
+-                      rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++                      rxd = ring->dma + i * eth->soc->rx.desc_size;
+                       if (!rxd->rxd1)
+                               continue;
+@@ -2757,7 +2757,7 @@ static void mtk_rx_clean(struct mtk_eth
+       if (!in_sram && ring->dma) {
+               dma_free_coherent(eth->dma_dev,
+-                                ring->dma_size * eth->soc->txrx.rxd_size,
++                                ring->dma_size * eth->soc->rx.desc_size,
+                                 ring->dma, ring->phys);
+               ring->dma = NULL;
+       }
+@@ -3120,7 +3120,7 @@ static void mtk_dma_free(struct mtk_eth
+                       netdev_reset_queue(eth->netdev[i]);
+       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+               dma_free_coherent(eth->dma_dev,
+-                                MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
++                                MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+                                 eth->scratch_ring, eth->phy_scratch_ring);
+               eth->scratch_ring = NULL;
+               eth->phy_scratch_ring = 0;
+@@ -3170,7 +3170,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+       eth->rx_events++;
+       if (likely(napi_schedule_prep(&eth->rx_napi))) {
+-              mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++              mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+               __napi_schedule(&eth->rx_napi);
+       }
+@@ -3196,9 +3196,9 @@ static irqreturn_t mtk_handle_irq(int ir
+       const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+       if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+-          eth->soc->txrx.rx_irq_done_mask) {
++          eth->soc->rx.irq_done_mask) {
+               if (mtk_r32(eth, reg_map->pdma.irq_status) &
+-                  eth->soc->txrx.rx_irq_done_mask)
++                  eth->soc->rx.irq_done_mask)
+                       mtk_handle_irq_rx(irq, _eth);
+       }
+       if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+@@ -3216,10 +3216,10 @@ static void mtk_poll_controller(struct n
+       struct mtk_eth *eth = mac->hw;
+       mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+-      mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++      mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+       mtk_handle_irq_rx(eth->irq[2], dev);
+       mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+-      mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++      mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ }
+ #endif
+@@ -3383,7 +3383,7 @@ static int mtk_open(struct net_device *d
+               napi_enable(&eth->tx_napi);
+               napi_enable(&eth->rx_napi);
+               mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+-              mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
++              mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
+               refcount_set(&eth->dma_refcnt, 1);
+       }
+       else
+@@ -3467,7 +3467,7 @@ static int mtk_stop(struct net_device *d
+       mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+       mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+-      mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++      mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+       napi_disable(&eth->tx_napi);
+       napi_disable(&eth->rx_napi);
+@@ -3943,9 +3943,9 @@ static int mtk_hw_init(struct mtk_eth *e
+       /* FE int grouping */
+       mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+-      mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
++      mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
+       mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+-      mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
++      mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
+       mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+       if (mtk_is_netsys_v3_or_greater(eth)) {
+@@ -5037,11 +5037,15 @@ static const struct mtk_soc_data mt2701_
+       .required_clks = MT7623_CLKS_BITMAP,
+       .required_pctl = true,
+       .version = 1,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5057,11 +5061,15 @@ static const struct mtk_soc_data mt7621_
+       .offload_version = 1,
+       .hash_offset = 2,
+       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5079,11 +5087,15 @@ static const struct mtk_soc_data mt7622_
+       .hash_offset = 2,
+       .has_accounting = true,
+       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5100,11 +5112,15 @@ static const struct mtk_soc_data mt7623_
+       .hash_offset = 2,
+       .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+       .disable_pll_modes = true,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5119,11 +5135,15 @@ static const struct mtk_soc_data mt7629_
+       .required_pctl = false,
+       .has_accounting = true,
+       .version = 1,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+@@ -5141,11 +5161,15 @@ static const struct mtk_soc_data mt7981_
+       .hash_offset = 4,
+       .has_accounting = true,
+       .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma_v2),
+-              .rxd_size = sizeof(struct mtk_rx_dma_v2),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma_v2),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++              .dma_len_offset = 8,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma_v2),
++              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .dma_l4_valid = RX_DMA_L4_VALID_V2,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+               .dma_len_offset = 8,
+       },
+@@ -5163,11 +5187,15 @@ static const struct mtk_soc_data mt7986_
+       .hash_offset = 4,
+       .has_accounting = true,
+       .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma_v2),
+-              .rxd_size = sizeof(struct mtk_rx_dma_v2),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma_v2),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++              .dma_len_offset = 8,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma_v2),
++              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .dma_l4_valid = RX_DMA_L4_VALID_V2,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+               .dma_len_offset = 8,
+       },
+@@ -5185,11 +5213,15 @@ static const struct mtk_soc_data mt7988_
+       .hash_offset = 4,
+       .has_accounting = true,
+       .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma_v2),
+-              .rxd_size = sizeof(struct mtk_rx_dma_v2),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma_v2),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++              .dma_len_offset = 8,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma_v2),
++              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .dma_l4_valid = RX_DMA_L4_VALID_V2,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+               .dma_len_offset = 8,
+       },
+@@ -5202,11 +5234,15 @@ static const struct mtk_soc_data rt5350_
+       .required_clks = MT7628_CLKS_BITMAP,
+       .required_pctl = false,
+       .version = 1,
+-      .txrx = {
+-              .txd_size = sizeof(struct mtk_tx_dma),
+-              .rxd_size = sizeof(struct mtk_rx_dma),
+-              .rx_irq_done_mask = MTK_RX_DONE_INT,
+-              .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
++      .tx = {
++              .desc_size = sizeof(struct mtk_tx_dma),
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
++      },
++      .rx = {
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
++              .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -327,8 +327,8 @@
+ /* QDMA descriptor txd3 */
+ #define TX_DMA_OWNER_CPU      BIT(31)
+ #define TX_DMA_LS0            BIT(30)
+-#define TX_DMA_PLEN0(x)               (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define TX_DMA_PLEN1(x)               ((x) & eth->soc->txrx.dma_max_len)
++#define TX_DMA_PLEN0(x)               (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
++#define TX_DMA_PLEN1(x)               ((x) & eth->soc->tx.dma_max_len)
+ #define TX_DMA_SWC            BIT(14)
+ #define TX_DMA_PQID           GENMASK(3, 0)
+ #define TX_DMA_ADDR64_MASK    GENMASK(3, 0)
+@@ -348,8 +348,8 @@
+ /* QDMA descriptor rxd2 */
+ #define RX_DMA_DONE           BIT(31)
+ #define RX_DMA_LSO            BIT(30)
+-#define RX_DMA_PREP_PLEN0(x)  (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define RX_DMA_GET_PLEN0(x)   (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
++#define RX_DMA_PREP_PLEN0(x)  (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
++#define RX_DMA_GET_PLEN0(x)   (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
+ #define RX_DMA_VTAG           BIT(15)
+ #define RX_DMA_ADDR64_MASK    GENMASK(3, 0)
+ #if IS_ENABLED(CONFIG_64BIT)
+@@ -1153,10 +1153,9 @@ struct mtk_reg_map {
+  * @foe_entry_size            Foe table entry size.
+  * @has_accounting            Bool indicating support for accounting of
+  *                            offloaded flows.
+- * @txd_size                  Tx DMA descriptor size.
+- * @rxd_size                  Rx DMA descriptor size.
+- * @rx_irq_done_mask          Rx irq done register mask.
+- * @rx_dma_l4_valid           Rx DMA valid register mask.
++ * @desc_size                 Tx/Rx DMA descriptor size.
++ * @irq_done_mask             Rx irq done register mask.
++ * @dma_l4_valid              Rx DMA valid register mask.
+  * @dma_max_len                       Max DMA tx/rx buffer length.
+  * @dma_len_offset            Tx/Rx DMA length field offset.
+  */
+@@ -1174,13 +1173,17 @@ struct mtk_soc_data {
+       bool            has_accounting;
+       bool            disable_pll_modes;
+       struct {
+-              u32     txd_size;
+-              u32     rxd_size;
+-              u32     rx_irq_done_mask;
+-              u32     rx_dma_l4_valid;
++              u32     desc_size;
+               u32     dma_max_len;
+               u32     dma_len_offset;
+-      } txrx;
++      } tx;
++      struct {
++              u32     desc_size;
++              u32     irq_done_mask;
++              u32     dma_l4_valid;
++              u32     dma_max_len;
++              u32     dma_len_offset;
++      } rx;
+ };
+ #define MTK_DMA_MONITOR_TIMEOUT               msecs_to_jiffies(1000)
diff --git a/target/linux/generic/backport-6.6/751-02-STABLE-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch b/target/linux/generic/backport-6.6/751-02-STABLE-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch
deleted file mode 100644 (file)
index e71ff09..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-From 4d572e867bdb372bb4add39a0fa495c6a9c9a8da Mon Sep 17 00:00:00 2001
-From: Daniel Golle <[email protected]>
-Date: Wed, 8 May 2024 11:43:56 +0100
-Subject: [PATCH] net: ethernet: mediatek: use ADMAv1 instead of ADMAv2.0 on
- MT7981 and MT7986
-
-ADMAv2.0 is plagued by RX hangs which can't easily detected and happen upon
-receival of a corrupted Ethernet frame.
-
-Use ADMAv1 instead which is also still present and usable, and doesn't
-suffer from that problem.
-
-Fixes: 197c9e9b17b1 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset")
-Co-developed-by: Lorenzo Bianconi <[email protected]>
-Signed-off-by: Lorenzo Bianconi <[email protected]>
-Signed-off-by: Daniel Golle <[email protected]>
-Link: https://lore.kernel.org/r/57cef74bbd0c243366ad1ff4221e3f72f437ec80.1715164770.git.daniel@makrotopia.org
-Signed-off-by: Jakub Kicinski <[email protected]>
----
- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 46 ++++++++++-----------
- 1 file changed, 23 insertions(+), 23 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -110,16 +110,16 @@ static const struct mtk_reg_map mt7986_r
-       .tx_irq_mask            = 0x461c,
-       .tx_irq_status          = 0x4618,
-       .pdma = {
--              .rx_ptr         = 0x6100,
--              .rx_cnt_cfg     = 0x6104,
--              .pcrx_ptr       = 0x6108,
--              .glo_cfg        = 0x6204,
--              .rst_idx        = 0x6208,
--              .delay_irq      = 0x620c,
--              .irq_status     = 0x6220,
--              .irq_mask       = 0x6228,
--              .adma_rx_dbg0   = 0x6238,
--              .int_grp        = 0x6250,
-+              .rx_ptr         = 0x4100,
-+              .rx_cnt_cfg     = 0x4104,
-+              .pcrx_ptr       = 0x4108,
-+              .glo_cfg        = 0x4204,
-+              .rst_idx        = 0x4208,
-+              .delay_irq      = 0x420c,
-+              .irq_status     = 0x4220,
-+              .irq_mask       = 0x4228,
-+              .adma_rx_dbg0   = 0x4238,
-+              .int_grp        = 0x4250,
-       },
-       .qdma = {
-               .qtx_cfg        = 0x4400,
-@@ -1106,7 +1106,7 @@ static bool mtk_rx_get_desc(struct mtk_e
-       rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
-       rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
-       rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
--      if (mtk_is_netsys_v2_or_greater(eth)) {
-+      if (mtk_is_netsys_v3_or_greater(eth)) {
-               rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
-               rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
-       }
-@@ -2024,7 +2024,7 @@ static int mtk_poll_rx(struct napi_struc
-                       break;
-               /* find out which mac the packet come from. values start at 1 */
--              if (mtk_is_netsys_v2_or_greater(eth)) {
-+              if (mtk_is_netsys_v3_or_greater(eth)) {
-                       u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
-                       switch (val) {
-@@ -2136,7 +2136,7 @@ static int mtk_poll_rx(struct napi_struc
-               skb->dev = netdev;
-               bytes += skb->len;
--              if (mtk_is_netsys_v2_or_greater(eth)) {
-+              if (mtk_is_netsys_v3_or_greater(eth)) {
-                       reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
-                       hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
-                       if (hash != MTK_RXD5_FOE_ENTRY)
-@@ -2686,7 +2686,7 @@ static int mtk_rx_alloc(struct mtk_eth *
-               rxd->rxd3 = 0;
-               rxd->rxd4 = 0;
--              if (mtk_is_netsys_v2_or_greater(eth)) {
-+              if (mtk_is_netsys_v3_or_greater(eth)) {
-                       rxd->rxd5 = 0;
-                       rxd->rxd6 = 0;
-                       rxd->rxd7 = 0;
-@@ -3889,7 +3889,7 @@ static int mtk_hw_init(struct mtk_eth *e
-       else
-               mtk_hw_reset(eth);
--      if (mtk_is_netsys_v2_or_greater(eth)) {
-+      if (mtk_is_netsys_v3_or_greater(eth)) {
-               /* Set FE to PDMAv2 if necessary */
-               val = mtk_r32(eth, MTK_FE_GLO_MISC);
-               mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
-@@ -5167,11 +5167,11 @@ static const struct mtk_soc_data mt7981_
-               .dma_len_offset = 8,
-       },
-       .rx = {
--              .desc_size = sizeof(struct mtk_rx_dma_v2),
--              .irq_done_mask = MTK_RX_DONE_INT_V2,
-+              .desc_size = sizeof(struct mtk_rx_dma),
-+              .irq_done_mask = MTK_RX_DONE_INT,
-               .dma_l4_valid = RX_DMA_L4_VALID_V2,
--              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
--              .dma_len_offset = 8,
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+              .dma_len_offset = 16,
-       },
- };
-@@ -5193,11 +5193,11 @@ static const struct mtk_soc_data mt7986_
-               .dma_len_offset = 8,
-       },
-       .rx = {
--              .desc_size = sizeof(struct mtk_rx_dma_v2),
--              .irq_done_mask = MTK_RX_DONE_INT_V2,
-+              .desc_size = sizeof(struct mtk_rx_dma),
-+              .irq_done_mask = MTK_RX_DONE_INT,
-               .dma_l4_valid = RX_DMA_L4_VALID_V2,
--              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
--              .dma_len_offset = 8,
-+              .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+              .dma_len_offset = 16,
-       },
- };
diff --git a/target/linux/generic/backport-6.6/751-02-v6.8-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch b/target/linux/generic/backport-6.6/751-02-v6.8-net-ethernet-mediatek-use-QDMA-instead-of-ADMAv2-on-.patch
new file mode 100644 (file)
index 0000000..e71ff09
--- /dev/null
@@ -0,0 +1,128 @@
+From 4d572e867bdb372bb4add39a0fa495c6a9c9a8da Mon Sep 17 00:00:00 2001
+From: Daniel Golle <[email protected]>
+Date: Wed, 8 May 2024 11:43:56 +0100
+Subject: [PATCH] net: ethernet: mediatek: use ADMAv1 instead of ADMAv2.0 on
+ MT7981 and MT7986
+
+ADMAv2.0 is plagued by RX hangs which can't easily detected and happen upon
+receival of a corrupted Ethernet frame.
+
+Use ADMAv1 instead which is also still present and usable, and doesn't
+suffer from that problem.
+
+Fixes: 197c9e9b17b1 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset")
+Co-developed-by: Lorenzo Bianconi <[email protected]>
+Signed-off-by: Lorenzo Bianconi <[email protected]>
+Signed-off-by: Daniel Golle <[email protected]>
+Link: https://lore.kernel.org/r/57cef74bbd0c243366ad1ff4221e3f72f437ec80.1715164770.git.daniel@makrotopia.org
+Signed-off-by: Jakub Kicinski <[email protected]>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 46 ++++++++++-----------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -110,16 +110,16 @@ static const struct mtk_reg_map mt7986_r
+       .tx_irq_mask            = 0x461c,
+       .tx_irq_status          = 0x4618,
+       .pdma = {
+-              .rx_ptr         = 0x6100,
+-              .rx_cnt_cfg     = 0x6104,
+-              .pcrx_ptr       = 0x6108,
+-              .glo_cfg        = 0x6204,
+-              .rst_idx        = 0x6208,
+-              .delay_irq      = 0x620c,
+-              .irq_status     = 0x6220,
+-              .irq_mask       = 0x6228,
+-              .adma_rx_dbg0   = 0x6238,
+-              .int_grp        = 0x6250,
++              .rx_ptr         = 0x4100,
++              .rx_cnt_cfg     = 0x4104,
++              .pcrx_ptr       = 0x4108,
++              .glo_cfg        = 0x4204,
++              .rst_idx        = 0x4208,
++              .delay_irq      = 0x420c,
++              .irq_status     = 0x4220,
++              .irq_mask       = 0x4228,
++              .adma_rx_dbg0   = 0x4238,
++              .int_grp        = 0x4250,
+       },
+       .qdma = {
+               .qtx_cfg        = 0x4400,
+@@ -1106,7 +1106,7 @@ static bool mtk_rx_get_desc(struct mtk_e
+       rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+       rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+       rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+-      if (mtk_is_netsys_v2_or_greater(eth)) {
++      if (mtk_is_netsys_v3_or_greater(eth)) {
+               rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+               rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+       }
+@@ -2024,7 +2024,7 @@ static int mtk_poll_rx(struct napi_struc
+                       break;
+               /* find out which mac the packet come from. values start at 1 */
+-              if (mtk_is_netsys_v2_or_greater(eth)) {
++              if (mtk_is_netsys_v3_or_greater(eth)) {
+                       u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+                       switch (val) {
+@@ -2136,7 +2136,7 @@ static int mtk_poll_rx(struct napi_struc
+               skb->dev = netdev;
+               bytes += skb->len;
+-              if (mtk_is_netsys_v2_or_greater(eth)) {
++              if (mtk_is_netsys_v3_or_greater(eth)) {
+                       reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+                       hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+                       if (hash != MTK_RXD5_FOE_ENTRY)
+@@ -2686,7 +2686,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+               rxd->rxd3 = 0;
+               rxd->rxd4 = 0;
+-              if (mtk_is_netsys_v2_or_greater(eth)) {
++              if (mtk_is_netsys_v3_or_greater(eth)) {
+                       rxd->rxd5 = 0;
+                       rxd->rxd6 = 0;
+                       rxd->rxd7 = 0;
+@@ -3889,7 +3889,7 @@ static int mtk_hw_init(struct mtk_eth *e
+       else
+               mtk_hw_reset(eth);
+-      if (mtk_is_netsys_v2_or_greater(eth)) {
++      if (mtk_is_netsys_v3_or_greater(eth)) {
+               /* Set FE to PDMAv2 if necessary */
+               val = mtk_r32(eth, MTK_FE_GLO_MISC);
+               mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
+@@ -5167,11 +5167,11 @@ static const struct mtk_soc_data mt7981_
+               .dma_len_offset = 8,
+       },
+       .rx = {
+-              .desc_size = sizeof(struct mtk_rx_dma_v2),
+-              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID_V2,
+-              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+-              .dma_len_offset = 8,
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
+       },
+ };
+@@ -5193,11 +5193,11 @@ static const struct mtk_soc_data mt7986_
+               .dma_len_offset = 8,
+       },
+       .rx = {
+-              .desc_size = sizeof(struct mtk_rx_dma_v2),
+-              .irq_done_mask = MTK_RX_DONE_INT_V2,
++              .desc_size = sizeof(struct mtk_rx_dma),
++              .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID_V2,
+-              .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+-              .dma_len_offset = 8,
++              .dma_max_len = MTK_TX_DMA_BUF_LEN,
++              .dma_len_offset = 16,
+       },
+ };
diff --git a/target/linux/generic/backport-6.6/770-net-introduce-napi_is_scheduled-helper.patch b/target/linux/generic/backport-6.6/770-net-introduce-napi_is_scheduled-helper.patch
deleted file mode 100644 (file)
index 319bc3e..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-From 7f3eb2174512fe6c9c0f062e96eccb0d3cc6d5cd Mon Sep 17 00:00:00 2001
-From: Christian Marangi <[email protected]>
-Date: Wed, 18 Oct 2023 14:35:47 +0200
-Subject: [PATCH] net: introduce napi_is_scheduled helper
-
-We currently have napi_if_scheduled_mark_missed that can be used to
-check if napi is scheduled but that does more thing than simply checking
-it and return a bool. Some driver already implement custom function to
-check if napi is scheduled.
-
-Drop these custom function and introduce napi_is_scheduled that simply
-check if napi is scheduled atomically.
-
-Update any driver and code that implement a similar check and instead
-use this new helper.
-
-Signed-off-by: Christian Marangi <[email protected]>
-Signed-off-by: Paolo Abeni <[email protected]>
----
- drivers/net/ethernet/chelsio/cxgb3/sge.c  |  8 --------
- drivers/net/wireless/realtek/rtw89/core.c |  2 +-
- include/linux/netdevice.h                 | 23 +++++++++++++++++++++++
- net/core/dev.c                            |  2 +-
- 4 files changed, 25 insertions(+), 10 deletions(-)
-
---- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
-+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
-@@ -2501,14 +2501,6 @@ static int napi_rx_handler(struct napi_s
-       return work_done;
- }
--/*
-- * Returns true if the device is already scheduled for polling.
-- */
--static inline int napi_is_scheduled(struct napi_struct *napi)
--{
--      return test_bit(NAPI_STATE_SCHED, &napi->state);
--}
--
- /**
-  *    process_pure_responses - process pure responses from a response queue
-  *    @adap: the adapter
---- a/drivers/net/wireless/realtek/rtw89/core.c
-+++ b/drivers/net/wireless/realtek/rtw89/core.c
-@@ -1744,7 +1744,7 @@ static void rtw89_core_rx_to_mac80211(st
-       struct napi_struct *napi = &rtwdev->napi;
-       /* In low power mode, napi isn't scheduled. Receive it to netif. */
--      if (unlikely(!test_bit(NAPI_STATE_SCHED, &napi->state)))
-+      if (unlikely(!napi_is_scheduled(napi)))
-               napi = NULL;
-       rtw89_core_hw_to_sband_rate(rx_status);
---- a/include/linux/netdevice.h
-+++ b/include/linux/netdevice.h
-@@ -480,6 +480,29 @@ static inline bool napi_prefer_busy_poll
-       return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
- }
-+/**
-+ * napi_is_scheduled - test if NAPI is scheduled
-+ * @n: NAPI context
-+ *
-+ * This check is "best-effort". With no locking implemented,
-+ * a NAPI can be scheduled or terminate right after this check
-+ * and produce not precise results.
-+ *
-+ * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
-+ * should not be used normally and napi_schedule should be
-+ * used instead.
-+ *
-+ * Use only if the driver really needs to check if a NAPI
-+ * is scheduled for example in the context of delayed timer
-+ * that can be skipped if a NAPI is already scheduled.
-+ *
-+ * Return True if NAPI is scheduled, False otherwise.
-+ */
-+static inline bool napi_is_scheduled(struct napi_struct *n)
-+{
-+      return test_bit(NAPI_STATE_SCHED, &n->state);
-+}
-+
- bool napi_schedule_prep(struct napi_struct *n);
- /**
---- a/net/core/dev.c
-+++ b/net/core/dev.c
-@@ -6643,7 +6643,7 @@ static int __napi_poll(struct napi_struc
-        * accidentally calling ->poll() when NAPI is not scheduled.
-        */
-       work = 0;
--      if (test_bit(NAPI_STATE_SCHED, &n->state)) {
-+      if (napi_is_scheduled(n)) {
-               work = n->poll(n, weight);
-               trace_napi_poll(n, work, weight);
-       }
diff --git a/target/linux/generic/backport-6.6/770-v6.7-net-introduce-napi_is_scheduled-helper.patch b/target/linux/generic/backport-6.6/770-v6.7-net-introduce-napi_is_scheduled-helper.patch
new file mode 100644 (file)
index 0000000..319bc3e
--- /dev/null
@@ -0,0 +1,96 @@
+From 7f3eb2174512fe6c9c0f062e96eccb0d3cc6d5cd Mon Sep 17 00:00:00 2001
+From: Christian Marangi <[email protected]>
+Date: Wed, 18 Oct 2023 14:35:47 +0200
+Subject: [PATCH] net: introduce napi_is_scheduled helper
+
+We currently have napi_if_scheduled_mark_missed that can be used to
+check if napi is scheduled but that does more thing than simply checking
+it and return a bool. Some driver already implement custom function to
+check if napi is scheduled.
+
+Drop these custom function and introduce napi_is_scheduled that simply
+check if napi is scheduled atomically.
+
+Update any driver and code that implement a similar check and instead
+use this new helper.
+
+Signed-off-by: Christian Marangi <[email protected]>
+Signed-off-by: Paolo Abeni <[email protected]>
+---
+ drivers/net/ethernet/chelsio/cxgb3/sge.c  |  8 --------
+ drivers/net/wireless/realtek/rtw89/core.c |  2 +-
+ include/linux/netdevice.h                 | 23 +++++++++++++++++++++++
+ net/core/dev.c                            |  2 +-
+ 4 files changed, 25 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
+@@ -2501,14 +2501,6 @@ static int napi_rx_handler(struct napi_s
+       return work_done;
+ }
+-/*
+- * Returns true if the device is already scheduled for polling.
+- */
+-static inline int napi_is_scheduled(struct napi_struct *napi)
+-{
+-      return test_bit(NAPI_STATE_SCHED, &napi->state);
+-}
+-
+ /**
+  *    process_pure_responses - process pure responses from a response queue
+  *    @adap: the adapter
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -1744,7 +1744,7 @@ static void rtw89_core_rx_to_mac80211(st
+       struct napi_struct *napi = &rtwdev->napi;
+       /* In low power mode, napi isn't scheduled. Receive it to netif. */
+-      if (unlikely(!test_bit(NAPI_STATE_SCHED, &napi->state)))
++      if (unlikely(!napi_is_scheduled(napi)))
+               napi = NULL;
+       rtw89_core_hw_to_sband_rate(rx_status);
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -480,6 +480,29 @@ static inline bool napi_prefer_busy_poll
+       return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
+ }
++/**
++ * napi_is_scheduled - test if NAPI is scheduled
++ * @n: NAPI context
++ *
++ * This check is "best-effort". With no locking implemented,
++ * a NAPI can be scheduled or terminate right after this check
++ * and produce not precise results.
++ *
++ * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
++ * should not be used normally and napi_schedule should be
++ * used instead.
++ *
++ * Use only if the driver really needs to check if a NAPI
++ * is scheduled for example in the context of delayed timer
++ * that can be skipped if a NAPI is already scheduled.
++ *
++ * Return True if NAPI is scheduled, False otherwise.
++ */
++static inline bool napi_is_scheduled(struct napi_struct *n)
++{
++      return test_bit(NAPI_STATE_SCHED, &n->state);
++}
++
+ bool napi_schedule_prep(struct napi_struct *n);
+ /**
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6643,7 +6643,7 @@ static int __napi_poll(struct napi_struc
+        * accidentally calling ->poll() when NAPI is not scheduled.
+        */
+       work = 0;
+-      if (test_bit(NAPI_STATE_SCHED, &n->state)) {
++      if (napi_is_scheduled(n)) {
+               work = n->poll(n, weight);
+               trace_napi_poll(n, work, weight);
+       }