--- /dev/null
+From 5d0fad48d2dec175ecb999974b94203c577973ef Mon Sep 17 00:00:00 2001
+Date: Wed, 8 May 2024 11:43:34 +0100
+Subject: [PATCH] net: ethernet: mediatek: split tx and rx fields in
+ mtk_soc_data struct
+
+Split tx and rx fields in mtk_soc_data struct. This is a preliminary
+patch to roll back to ADMAv1 for MT7986 and MT7981 SoC in order to fix a
+hw hang if the device receives a corrupted packet when using ADMAv2.0.
+
+Fixes: 197c9e9b17b1 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset")
+Link: https://lore.kernel.org/r/70a799b1f060ec2f57883e88ccb420ac0fb0abb5.1715164770.git.daniel@makrotopia.org
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 29 +--
+ 2 files changed, 139 insertions(+), 100 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -1078,7 +1078,7 @@ static int mtk_init_fq_dma(struct mtk_et
+ eth->scratch_ring = eth->sram_base;
+ else
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+- cnt * soc->txrx.txd_size,
++ cnt * soc->tx.desc_size,
+ ð->phy_scratch_ring,
+ GFP_KERNEL);
+ if (unlikely(!eth->scratch_ring))
+@@ -1094,16 +1094,16 @@ static int mtk_init_fq_dma(struct mtk_et
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
+
+- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
++ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+
+ for (i = 0; i < cnt; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+- txd = eth->scratch_ring + i * soc->txrx.txd_size;
++ txd = eth->scratch_ring + i * soc->tx.desc_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (i < cnt - 1)
+ txd->txd2 = eth->phy_scratch_ring +
+- (i + 1) * soc->txrx.txd_size;
++ (i + 1) * soc->tx.desc_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ txd->txd4 = 0;
+@@ -1350,7 +1350,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ if (itxd == ring->last_free)
+ return -ENOMEM;
+
+- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+ memset(itx_buf, 0, sizeof(*itx_buf));
+
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+@@ -1391,7 +1391,7 @@ static int mtk_tx_map(struct sk_buff *sk
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = min_t(unsigned int, frag_size,
+- soc->txrx.dma_max_len);
++ soc->tx.dma_max_len);
+ txd_info.qid = queue;
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ !(frag_size - txd_info.size);
+@@ -1404,7 +1404,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+- soc->txrx.txd_size);
++ soc->tx.desc_size);
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+@@ -1447,7 +1447,7 @@ static int mtk_tx_map(struct sk_buff *sk
+ } else {
+ int next_idx;
+
+- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
++ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
+@@ -1456,7 +1456,7 @@ static int mtk_tx_map(struct sk_buff *sk
+
+ err_dma:
+ do {
+- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
+
+ /* unmap dma */
+ mtk_tx_unmap(eth, tx_buf, false);
+@@ -1481,7 +1481,7 @@ static int mtk_cal_txd_req(struct mtk_et
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+- eth->soc->txrx.dma_max_len);
++ eth->soc->tx.dma_max_len);
+ }
+ } else {
+ nfrags += skb_shinfo(skb)->nr_frags;
+@@ -1588,7 +1588,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
+
+ ring = ð->rx_ring[i];
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ if (rxd->rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+@@ -1756,7 +1756,7 @@ static int mtk_xdp_submit_frame(struct m
+ }
+ htxd = txd;
+
+- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+@@ -1776,7 +1776,7 @@ static int mtk_xdp_submit_frame(struct m
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+- soc->txrx.txd_size);
++ soc->tx.desc_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+@@ -1813,7 +1813,7 @@ static int mtk_xdp_submit_frame(struct m
+ } else {
+ int idx;
+
+- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
++ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+@@ -1825,7 +1825,7 @@ static int mtk_xdp_submit_frame(struct m
+ unmap:
+ while (htxd != txd) {
+ txd_pdma = qdma_to_pdma(ring, htxd);
+- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
++ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
+ mtk_tx_unmap(eth, tx_buf, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+@@ -1953,7 +1953,7 @@ static int mtk_poll_rx(struct napi_struc
+ goto rx_done;
+
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + idx * eth->soc->rx.desc_size;
+ data = ring->data[idx];
+
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
+@@ -2088,7 +2088,7 @@ static int mtk_poll_rx(struct napi_struc
+ rxdcsum = &trxd.rxd4;
+ }
+
+- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
++ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+@@ -2209,7 +2209,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
+ break;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
+- eth->soc->txrx.txd_size);
++ eth->soc->tx.desc_size);
+ if (!tx_buf->data)
+ break;
+
+@@ -2257,7 +2257,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
+ }
+ mtk_tx_unmap(eth, tx_buf, true);
+
+- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
++ desc = ring->dma + cpu * eth->soc->tx.desc_size;
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+@@ -2346,7 +2346,7 @@ static int mtk_napi_rx(struct napi_struc
+ do {
+ int rx_done;
+
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
++ mtk_w32(eth, eth->soc->rx.irq_done_mask,
+ reg_map->pdma.irq_status);
+ rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+ rx_done_total += rx_done;
+@@ -2362,10 +2362,10 @@ static int mtk_napi_rx(struct napi_struc
+ return budget;
+
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+- eth->soc->txrx.rx_irq_done_mask);
++ eth->soc->rx.irq_done_mask);
+
+ if (napi_complete_done(napi, rx_done_total))
+- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+
+ return rx_done_total;
+ }
+@@ -2374,7 +2374,7 @@ static int mtk_tx_alloc(struct mtk_eth *
+ {
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = ð->tx_ring;
+- int i, sz = soc->txrx.txd_size;
++ int i, sz = soc->tx.desc_size;
+ struct mtk_tx_dma_v2 *txd;
+ int ring_size;
+ u32 ofs, val;
+@@ -2497,14 +2497,14 @@ static void mtk_tx_clean(struct mtk_eth
+ }
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * soc->txrx.txd_size,
++ ring->dma_size * soc->tx.desc_size,
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * soc->txrx.txd_size,
++ ring->dma_size * soc->tx.desc_size,
+ ring->dma_pdma, ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
+@@ -2559,15 +2559,15 @@ static int mtk_rx_alloc(struct mtk_eth *
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+ rx_flag != MTK_RX_FLAGS_NORMAL) {
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+- rx_dma_size * eth->soc->txrx.rxd_size,
+- &ring->phys, GFP_KERNEL);
++ rx_dma_size * eth->soc->rx.desc_size,
++ &ring->phys, GFP_KERNEL);
+ } else {
+ struct mtk_tx_ring *tx_ring = ð->tx_ring;
+
+ ring->dma = tx_ring->dma + tx_ring_size *
+- eth->soc->txrx.txd_size * (ring_no + 1);
++ eth->soc->tx.desc_size * (ring_no + 1);
+ ring->phys = tx_ring->phys + tx_ring_size *
+- eth->soc->txrx.txd_size * (ring_no + 1);
++ eth->soc->tx.desc_size * (ring_no + 1);
+ }
+
+ if (!ring->dma)
+@@ -2578,7 +2578,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+ dma_addr_t dma_addr;
+ void *data;
+
+- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + i * eth->soc->rx.desc_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+@@ -2667,7 +2667,7 @@ static void mtk_rx_clean(struct mtk_eth
+ if (!ring->data[i])
+ continue;
+
+- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
++ rxd = ring->dma + i * eth->soc->rx.desc_size;
+ if (!rxd->rxd1)
+ continue;
+
+@@ -2684,7 +2684,7 @@ static void mtk_rx_clean(struct mtk_eth
+
+ if (!in_sram && ring->dma) {
+ dma_free_coherent(eth->dma_dev,
+- ring->dma_size * eth->soc->txrx.rxd_size,
++ ring->dma_size * eth->soc->rx.desc_size,
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
+@@ -3047,7 +3047,7 @@ static void mtk_dma_free(struct mtk_eth
+ netdev_reset_queue(eth->netdev[i]);
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+ dma_free_coherent(eth->dma_dev,
+- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
++ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+ eth->scratch_ring, eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+@@ -3098,7 +3098,7 @@ static irqreturn_t mtk_handle_irq_rx(int
+ eth->rx_events++;
+ if (likely(napi_schedule_prep(ð->rx_napi))) {
+ __napi_schedule(ð->rx_napi);
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ }
+
+ return IRQ_HANDLED;
+@@ -3123,9 +3123,9 @@ static irqreturn_t mtk_handle_irq(int ir
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+- eth->soc->txrx.rx_irq_done_mask) {
++ eth->soc->rx.irq_done_mask) {
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
+- eth->soc->txrx.rx_irq_done_mask)
++ eth->soc->rx.irq_done_mask)
+ mtk_handle_irq_rx(irq, _eth);
+ }
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+@@ -3143,10 +3143,10 @@ static void mtk_poll_controller(struct n
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
+ }
+ #endif
+
+@@ -3308,7 +3308,7 @@ static int mtk_open(struct net_device *d
+ napi_enable(ð->tx_napi);
+ napi_enable(ð->rx_napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
+ refcount_set(ð->dma_refcnt, 1);
+ }
+ else
+@@ -3391,7 +3391,7 @@ static int mtk_stop(struct net_device *d
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
++ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ napi_disable(ð->tx_napi);
+ napi_disable(ð->rx_napi);
+
+@@ -3867,9 +3867,9 @@ static int mtk_hw_init(struct mtk_eth *e
+
+ /* FE int grouping */
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
++ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
++ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+@@ -4944,11 +4944,15 @@ static const struct mtk_soc_data mt2701_
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -4964,11 +4968,15 @@ static const struct mtk_soc_data mt7621_
+ .offload_version = 1,
+ .hash_offset = 2,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -4986,11 +4994,15 @@ static const struct mtk_soc_data mt7622_
+ .hash_offset = 2,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5007,11 +5019,15 @@ static const struct mtk_soc_data mt7623_
+ .hash_offset = 2,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ .disable_pll_modes = true,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5026,11 +5042,15 @@ static const struct mtk_soc_data mt7629_
+ .required_pctl = false,
+ .has_accounting = true,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+@@ -5048,11 +5068,15 @@ static const struct mtk_soc_data mt7981_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5070,11 +5094,15 @@ static const struct mtk_soc_data mt7986_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5092,11 +5120,15 @@ static const struct mtk_soc_data mt7988_
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma_v2),
+- .rxd_size = sizeof(struct mtk_rx_dma_v2),
+- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma_v2),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
++ .dma_len_offset = 8,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma_v2),
++ .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+@@ -5109,11 +5141,15 @@ static const struct mtk_soc_data rt5350_
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 1,
+- .txrx = {
+- .txd_size = sizeof(struct mtk_tx_dma),
+- .rxd_size = sizeof(struct mtk_rx_dma),
+- .rx_irq_done_mask = MTK_RX_DONE_INT,
+- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
++ .tx = {
++ .desc_size = sizeof(struct mtk_tx_dma),
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
++ },
++ .rx = {
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
++ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -327,8 +327,8 @@
+ /* QDMA descriptor txd3 */
+ #define TX_DMA_OWNER_CPU BIT(31)
+ #define TX_DMA_LS0 BIT(30)
+-#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
++#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
++#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
+ #define TX_DMA_SWC BIT(14)
+ #define TX_DMA_PQID GENMASK(3, 0)
+ #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
+@@ -348,8 +348,8 @@
+ /* QDMA descriptor rxd2 */
+ #define RX_DMA_DONE BIT(31)
+ #define RX_DMA_LSO BIT(30)
+-#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+-#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
++#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
++#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
+ #define RX_DMA_VTAG BIT(15)
+ #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
+ #if IS_ENABLED(CONFIG_64BIT)
+@@ -1150,10 +1150,9 @@ struct mtk_reg_map {
+ * @foe_entry_size Foe table entry size.
+ * @has_accounting Bool indicating support for accounting of
+ * offloaded flows.
+- * @txd_size Tx DMA descriptor size.
+- * @rxd_size Rx DMA descriptor size.
+- * @rx_irq_done_mask Rx irq done register mask.
+- * @rx_dma_l4_valid Rx DMA valid register mask.
++ * @desc_size Tx/Rx DMA descriptor size.
++ * @irq_done_mask Rx irq done register mask.
++ * @dma_l4_valid Rx DMA valid register mask.
+ * @dma_max_len Max DMA tx/rx buffer length.
+ * @dma_len_offset Tx/Rx DMA length field offset.
+ */
+@@ -1171,13 +1170,17 @@ struct mtk_soc_data {
+ bool has_accounting;
+ bool disable_pll_modes;
+ struct {
+- u32 txd_size;
+- u32 rxd_size;
+- u32 rx_irq_done_mask;
+- u32 rx_dma_l4_valid;
++ u32 desc_size;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+- } txrx;
++ } tx;
++ struct {
++ u32 desc_size;
++ u32 irq_done_mask;
++ u32 dma_l4_valid;
++ u32 dma_max_len;
++ u32 dma_len_offset;
++ } rx;
+ };
+
+ #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
--- /dev/null
+From 4d572e867bdb372bb4add39a0fa495c6a9c9a8da Mon Sep 17 00:00:00 2001
+Date: Wed, 8 May 2024 11:43:56 +0100
+Subject: [PATCH] net: ethernet: mediatek: use ADMAv1 instead of ADMAv2.0 on
+ MT7981 and MT7986
+
+ADMAv2.0 is plagued by RX hangs which can't easily detected and happen upon
+receival of a corrupted Ethernet frame.
+
+Use ADMAv1 instead which is also still present and usable, and doesn't
+suffer from that problem.
+
+Fixes: 197c9e9b17b1 ("net: ethernet: mtk_eth_soc: introduce support for mt7986 chipset")
+Link: https://lore.kernel.org/r/57cef74bbd0c243366ad1ff4221e3f72f437ec80.1715164770.git.daniel@makrotopia.org
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 46 ++++++++++-----------
+ 1 file changed, 23 insertions(+), 23 deletions(-)
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -108,16 +108,16 @@ static const struct mtk_reg_map mt7986_r
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+- .rx_ptr = 0x6100,
+- .rx_cnt_cfg = 0x6104,
+- .pcrx_ptr = 0x6108,
+- .glo_cfg = 0x6204,
+- .rst_idx = 0x6208,
+- .delay_irq = 0x620c,
+- .irq_status = 0x6220,
+- .irq_mask = 0x6228,
+- .adma_rx_dbg0 = 0x6238,
+- .int_grp = 0x6250,
++ .rx_ptr = 0x4100,
++ .rx_cnt_cfg = 0x4104,
++ .pcrx_ptr = 0x4108,
++ .glo_cfg = 0x4204,
++ .rst_idx = 0x4208,
++ .delay_irq = 0x420c,
++ .irq_status = 0x4220,
++ .irq_mask = 0x4228,
++ .adma_rx_dbg0 = 0x4238,
++ .int_grp = 0x4250,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+@@ -1046,7 +1046,7 @@ static bool mtk_rx_get_desc(struct mtk_e
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ }
+@@ -1960,7 +1960,7 @@ static int mtk_poll_rx(struct napi_struc
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+
+ switch (val) {
+@@ -2072,7 +2072,7 @@ static int mtk_poll_rx(struct napi_struc
+ skb->dev = netdev;
+ bytes += skb->len;
+
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+@@ -2613,7 +2613,7 @@ static int mtk_rx_alloc(struct mtk_eth *
+
+ rxd->rxd3 = 0;
+ rxd->rxd4 = 0;
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ rxd->rxd5 = 0;
+ rxd->rxd6 = 0;
+ rxd->rxd7 = 0;
+@@ -3813,7 +3813,7 @@ static int mtk_hw_init(struct mtk_eth *e
+ else
+ mtk_hw_reset(eth);
+
+- if (mtk_is_netsys_v2_or_greater(eth)) {
++ if (mtk_is_netsys_v3_or_greater(eth)) {
+ /* Set FE to PDMAv2 if necessary */
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
+@@ -5074,11 +5074,11 @@ static const struct mtk_soc_data mt7981_
+ .dma_len_offset = 8,
+ },
+ .rx = {
+- .desc_size = sizeof(struct mtk_rx_dma_v2),
+- .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+- .dma_len_offset = 8,
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
+ },
+ };
+
+@@ -5100,11 +5100,11 @@ static const struct mtk_soc_data mt7986_
+ .dma_len_offset = 8,
+ },
+ .rx = {
+- .desc_size = sizeof(struct mtk_rx_dma_v2),
+- .irq_done_mask = MTK_RX_DONE_INT_V2,
++ .desc_size = sizeof(struct mtk_rx_dma),
++ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+- .dma_len_offset = 8,
++ .dma_max_len = MTK_TX_DMA_BUF_LEN,
++ .dma_len_offset = 16,
+ },
+ };
+
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -1432,6 +1432,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
+@@ -1435,6 +1435,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
eth->rx_events++;
if (likely(napi_schedule_prep(ð->rx_napi))) {
- __napi_schedule(ð->rx_napi);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
+ __napi_schedule(ð->rx_napi);
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -1323,6 +1323,22 @@ struct mtk_mac {
+@@ -1326,6 +1326,22 @@ struct mtk_mac {
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
{
return eth->soc->version == 1;
-@@ -1337,6 +1353,7 @@ static inline bool mtk_is_netsys_v3_or_g
+@@ -1340,6 +1356,7 @@ static inline bool mtk_is_netsys_v3_or_g
{
return eth->soc->version > 2;
}
struct mtk_tx_dma_desc_info {
dma_addr_t addr;
-@@ -1184,6 +1311,24 @@ struct mtk_soc_data {
+@@ -1187,6 +1314,24 @@ struct mtk_soc_data {
/* currently no SoC has more than 3 macs */
#define MTK_MAX_DEVS 3
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
-@@ -1204,6 +1349,12 @@ struct mtk_soc_data {
+@@ -1207,6 +1352,12 @@ struct mtk_soc_data {
* @infra: The register map pointing at the range used to setup
* SGMII and GePHY path
* @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances
* @pctl: The register map pointing at the range used to setup
* GMAC port drive/slew values
* @dma_refcnt: track how many netdevs are using the DMA engine
-@@ -1247,6 +1398,10 @@ struct mtk_eth {
+@@ -1250,6 +1401,10 @@ struct mtk_eth {
struct regmap *ethsys;
struct regmap *infra;
struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS];
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
-@@ -1434,6 +1589,19 @@ static inline u32 mtk_get_ib2_multicast_
+@@ -1437,6 +1592,19 @@ static inline u32 mtk_get_ib2_multicast_
return MTK_FOE_IB2_MULTICAST;
}
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
-@@ -1442,8 +1610,10 @@ u32 mtk_r32(struct mtk_eth *eth, unsigne
+@@ -1445,8 +1613,10 @@ u32 mtk_r32(struct mtk_eth *eth, unsigne
u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
-@@ -1453,5 +1623,63 @@ int mtk_flow_offload_cmd(struct mtk_eth
+@@ -1456,5 +1626,63 @@ int mtk_flow_offload_cmd(struct mtk_eth
void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
+++ /dev/null
-Date: Thu, 2 Nov 2023 16:47:07 +0100
-Subject: [PATCH net-next 1/2] net: ethernet: mediatek: split tx and rx fields
- in mtk_soc_data struct
-
-Split tx and rx fields in mtk_soc_data struct. This is a preliminary
-patch to roll back to QDMA for MT7986 SoC in order to fix a hw hang
-if the device receives a corrupted packet.
-
----
- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 210 ++++++++++++--------
- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 29 +--
- 2 files changed, 139 insertions(+), 100 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1239,7 +1239,7 @@ static int mtk_init_fq_dma(struct mtk_et
- eth->scratch_ring = eth->sram_base;
- else
- eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
-- cnt * soc->txrx.txd_size,
-+ cnt * soc->tx.desc_size,
- ð->phy_scratch_ring,
- GFP_KERNEL);
- if (unlikely(!eth->scratch_ring))
-@@ -1255,16 +1255,16 @@ static int mtk_init_fq_dma(struct mtk_et
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
-
-- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
-+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
-
- for (i = 0; i < cnt; i++) {
- struct mtk_tx_dma_v2 *txd;
-
-- txd = eth->scratch_ring + i * soc->txrx.txd_size;
-+ txd = eth->scratch_ring + i * soc->tx.desc_size;
- txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
- if (i < cnt - 1)
- txd->txd2 = eth->phy_scratch_ring +
-- (i + 1) * soc->txrx.txd_size;
-+ (i + 1) * soc->tx.desc_size;
-
- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
- txd->txd4 = 0;
-@@ -1511,7 +1511,7 @@ static int mtk_tx_map(struct sk_buff *sk
- if (itxd == ring->last_free)
- return -ENOMEM;
-
-- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
-+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
- memset(itx_buf, 0, sizeof(*itx_buf));
-
- txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
-@@ -1552,7 +1552,7 @@ static int mtk_tx_map(struct sk_buff *sk
-
- memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
- txd_info.size = min_t(unsigned int, frag_size,
-- soc->txrx.dma_max_len);
-+ soc->tx.dma_max_len);
- txd_info.qid = queue;
- txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
- !(frag_size - txd_info.size);
-@@ -1565,7 +1565,7 @@ static int mtk_tx_map(struct sk_buff *sk
- mtk_tx_set_dma_desc(dev, txd, &txd_info);
-
- tx_buf = mtk_desc_to_tx_buf(ring, txd,
-- soc->txrx.txd_size);
-+ soc->tx.desc_size);
- if (new_desc)
- memset(tx_buf, 0, sizeof(*tx_buf));
- tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
-@@ -1608,7 +1608,7 @@ static int mtk_tx_map(struct sk_buff *sk
- } else {
- int next_idx;
-
-- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
-+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
- ring->dma_size);
- mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
- }
-@@ -1617,7 +1617,7 @@ static int mtk_tx_map(struct sk_buff *sk
-
- err_dma:
- do {
-- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
-+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
-
- /* unmap dma */
- mtk_tx_unmap(eth, tx_buf, false);
-@@ -1642,7 +1642,7 @@ static int mtk_cal_txd_req(struct mtk_et
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- nfrags += DIV_ROUND_UP(skb_frag_size(frag),
-- eth->soc->txrx.dma_max_len);
-+ eth->soc->tx.dma_max_len);
- }
- } else {
- nfrags += skb_shinfo(skb)->nr_frags;
-@@ -1783,7 +1783,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
-
- ring = ð->rx_ring[i];
- idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
-- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
-+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
- if (rxd->rxd2 & RX_DMA_DONE) {
- ring->calc_idx_update = true;
- return ring;
-@@ -1951,7 +1951,7 @@ static int mtk_xdp_submit_frame(struct m
- }
- htxd = txd;
-
-- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
-+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
- memset(tx_buf, 0, sizeof(*tx_buf));
- htx_buf = tx_buf;
-
-@@ -1971,7 +1971,7 @@ static int mtk_xdp_submit_frame(struct m
- goto unmap;
-
- tx_buf = mtk_desc_to_tx_buf(ring, txd,
-- soc->txrx.txd_size);
-+ soc->tx.desc_size);
- memset(tx_buf, 0, sizeof(*tx_buf));
- n_desc++;
- }
-@@ -2008,7 +2008,7 @@ static int mtk_xdp_submit_frame(struct m
- } else {
- int idx;
-
-- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
-+ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
- mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
- MT7628_TX_CTX_IDX0);
- }
-@@ -2020,7 +2020,7 @@ static int mtk_xdp_submit_frame(struct m
- unmap:
- while (htxd != txd) {
- txd_pdma = qdma_to_pdma(ring, htxd);
-- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
-+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
- mtk_tx_unmap(eth, tx_buf, false);
-
- htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
-@@ -2148,7 +2148,7 @@ static int mtk_poll_rx(struct napi_struc
- goto rx_done;
-
- idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
-- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
-+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
- data = ring->data[idx];
-
- if (!mtk_rx_get_desc(eth, &trxd, rxd))
-@@ -2283,7 +2283,7 @@ static int mtk_poll_rx(struct napi_struc
- rxdcsum = &trxd.rxd4;
- }
-
-- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
-+ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
-@@ -2404,7 +2404,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
- break;
-
- tx_buf = mtk_desc_to_tx_buf(ring, desc,
-- eth->soc->txrx.txd_size);
-+ eth->soc->tx.desc_size);
- if (!tx_buf->data)
- break;
-
-@@ -2452,7 +2452,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
- }
- mtk_tx_unmap(eth, tx_buf, true);
-
-- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
-+ desc = ring->dma + cpu * eth->soc->tx.desc_size;
- ring->last_free = desc;
- atomic_inc(&ring->free_count);
-
-@@ -2541,7 +2541,7 @@ static int mtk_napi_rx(struct napi_struc
- do {
- int rx_done;
-
-- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
-+ mtk_w32(eth, eth->soc->rx.irq_done_mask,
- reg_map->pdma.irq_status);
- rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
- rx_done_total += rx_done;
-@@ -2557,10 +2557,10 @@ static int mtk_napi_rx(struct napi_struc
- return budget;
-
- } while (mtk_r32(eth, reg_map->pdma.irq_status) &
-- eth->soc->txrx.rx_irq_done_mask);
-+ eth->soc->rx.irq_done_mask);
-
- if (napi_complete_done(napi, rx_done_total))
-- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
-+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
-
- return rx_done_total;
- }
-@@ -2569,7 +2569,7 @@ static int mtk_tx_alloc(struct mtk_eth *
- {
- const struct mtk_soc_data *soc = eth->soc;
- struct mtk_tx_ring *ring = ð->tx_ring;
-- int i, sz = soc->txrx.txd_size;
-+ int i, sz = soc->tx.desc_size;
- struct mtk_tx_dma_v2 *txd;
- int ring_size;
- u32 ofs, val;
-@@ -2692,14 +2692,14 @@ static void mtk_tx_clean(struct mtk_eth
- }
- if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
- dma_free_coherent(eth->dma_dev,
-- ring->dma_size * soc->txrx.txd_size,
-+ ring->dma_size * soc->tx.desc_size,
- ring->dma, ring->phys);
- ring->dma = NULL;
- }
-
- if (ring->dma_pdma) {
- dma_free_coherent(eth->dma_dev,
-- ring->dma_size * soc->txrx.txd_size,
-+ ring->dma_size * soc->tx.desc_size,
- ring->dma_pdma, ring->phys_pdma);
- ring->dma_pdma = NULL;
- }
-@@ -2754,15 +2754,15 @@ static int mtk_rx_alloc(struct mtk_eth *
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
- rx_flag != MTK_RX_FLAGS_NORMAL) {
- ring->dma = dma_alloc_coherent(eth->dma_dev,
-- rx_dma_size * eth->soc->txrx.rxd_size,
-- &ring->phys, GFP_KERNEL);
-+ rx_dma_size * eth->soc->rx.desc_size,
-+ &ring->phys, GFP_KERNEL);
- } else {
- struct mtk_tx_ring *tx_ring = ð->tx_ring;
-
- ring->dma = tx_ring->dma + tx_ring_size *
-- eth->soc->txrx.txd_size * (ring_no + 1);
-+ eth->soc->tx.desc_size * (ring_no + 1);
- ring->phys = tx_ring->phys + tx_ring_size *
-- eth->soc->txrx.txd_size * (ring_no + 1);
-+ eth->soc->tx.desc_size * (ring_no + 1);
- }
-
- if (!ring->dma)
-@@ -2773,7 +2773,7 @@ static int mtk_rx_alloc(struct mtk_eth *
- dma_addr_t dma_addr;
- void *data;
-
-- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
-+ rxd = ring->dma + i * eth->soc->rx.desc_size;
- if (ring->page_pool) {
- data = mtk_page_pool_get_buff(ring->page_pool,
- &dma_addr, GFP_KERNEL);
-@@ -2862,7 +2862,7 @@ static void mtk_rx_clean(struct mtk_eth
- if (!ring->data[i])
- continue;
-
-- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
-+ rxd = ring->dma + i * eth->soc->rx.desc_size;
- if (!rxd->rxd1)
- continue;
-
-@@ -2879,7 +2879,7 @@ static void mtk_rx_clean(struct mtk_eth
-
- if (!in_sram && ring->dma) {
- dma_free_coherent(eth->dma_dev,
-- ring->dma_size * eth->soc->txrx.rxd_size,
-+ ring->dma_size * eth->soc->rx.desc_size,
- ring->dma, ring->phys);
- ring->dma = NULL;
- }
-@@ -3242,7 +3242,7 @@ static void mtk_dma_free(struct mtk_eth
- netdev_reset_queue(eth->netdev[i]);
- if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
- dma_free_coherent(eth->dma_dev,
-- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
-+ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
- eth->scratch_ring, eth->phy_scratch_ring);
- eth->scratch_ring = NULL;
- eth->phy_scratch_ring = 0;
-@@ -3292,7 +3292,7 @@ static irqreturn_t mtk_handle_irq_rx(int
-
- eth->rx_events++;
- if (likely(napi_schedule_prep(ð->rx_napi))) {
-- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
-+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
- __napi_schedule(ð->rx_napi);
- }
-
-@@ -3318,9 +3318,9 @@ static irqreturn_t mtk_handle_irq(int ir
- const struct mtk_reg_map *reg_map = eth->soc->reg_map;
-
- if (mtk_r32(eth, reg_map->pdma.irq_mask) &
-- eth->soc->txrx.rx_irq_done_mask) {
-+ eth->soc->rx.irq_done_mask) {
- if (mtk_r32(eth, reg_map->pdma.irq_status) &
-- eth->soc->txrx.rx_irq_done_mask)
-+ eth->soc->rx.irq_done_mask)
- mtk_handle_irq_rx(irq, _eth);
- }
- if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
-@@ -3338,10 +3338,10 @@ static void mtk_poll_controller(struct n
- struct mtk_eth *eth = mac->hw;
-
- mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
-- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
-+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
- mtk_handle_irq_rx(eth->irq[2], dev);
- mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
-- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
-+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
- }
- #endif
-
-@@ -3506,7 +3506,7 @@ static int mtk_open(struct net_device *d
- napi_enable(ð->tx_napi);
- napi_enable(ð->rx_napi);
- mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
-- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
-+ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
- refcount_set(ð->dma_refcnt, 1);
- }
- else
-@@ -3589,7 +3589,7 @@ static int mtk_stop(struct net_device *d
- mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
-
- mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
-- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
-+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
- napi_disable(ð->tx_napi);
- napi_disable(ð->rx_napi);
-
-@@ -4065,9 +4065,9 @@ static int mtk_hw_init(struct mtk_eth *e
-
- /* FE int grouping */
- mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
-- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
-+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
- mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
-- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
-+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
- mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
-
- if (mtk_is_netsys_v3_or_greater(eth)) {
-@@ -5167,11 +5167,15 @@ static const struct mtk_soc_data mt2701_
- .required_clks = MT7623_CLKS_BITMAP,
- .required_pctl = true,
- .version = 1,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma),
-- .rxd_size = sizeof(struct mtk_rx_dma),
-- .rx_irq_done_mask = MTK_RX_DONE_INT,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+ .dma_len_offset = 16,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma),
-+ .irq_done_mask = MTK_RX_DONE_INT,
-+ .dma_l4_valid = RX_DMA_L4_VALID,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5187,11 +5191,15 @@ static const struct mtk_soc_data mt7621_
- .offload_version = 1,
- .hash_offset = 2,
- .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma),
-- .rxd_size = sizeof(struct mtk_rx_dma),
-- .rx_irq_done_mask = MTK_RX_DONE_INT,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+ .dma_len_offset = 16,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma),
-+ .irq_done_mask = MTK_RX_DONE_INT,
-+ .dma_l4_valid = RX_DMA_L4_VALID,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5209,11 +5217,15 @@ static const struct mtk_soc_data mt7622_
- .hash_offset = 2,
- .has_accounting = true,
- .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma),
-- .rxd_size = sizeof(struct mtk_rx_dma),
-- .rx_irq_done_mask = MTK_RX_DONE_INT,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+ .dma_len_offset = 16,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma),
-+ .irq_done_mask = MTK_RX_DONE_INT,
-+ .dma_l4_valid = RX_DMA_L4_VALID,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5230,11 +5242,15 @@ static const struct mtk_soc_data mt7623_
- .hash_offset = 2,
- .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .disable_pll_modes = true,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma),
-- .rxd_size = sizeof(struct mtk_rx_dma),
-- .rx_irq_done_mask = MTK_RX_DONE_INT,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+ .dma_len_offset = 16,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma),
-+ .irq_done_mask = MTK_RX_DONE_INT,
-+ .dma_l4_valid = RX_DMA_L4_VALID,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5249,11 +5265,15 @@ static const struct mtk_soc_data mt7629_
- .required_pctl = false,
- .has_accounting = true,
- .version = 1,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma),
-- .rxd_size = sizeof(struct mtk_rx_dma),
-- .rx_irq_done_mask = MTK_RX_DONE_INT,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+ .dma_len_offset = 16,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma),
-+ .irq_done_mask = MTK_RX_DONE_INT,
-+ .dma_l4_valid = RX_DMA_L4_VALID,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
-@@ -5271,11 +5291,15 @@ static const struct mtk_soc_data mt7981_
- .hash_offset = 4,
- .has_accounting = true,
- .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma_v2),
-- .rxd_size = sizeof(struct mtk_rx_dma_v2),
-- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma_v2),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-+ .dma_len_offset = 8,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma_v2),
-+ .irq_done_mask = MTK_RX_DONE_INT_V2,
-+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
- .dma_len_offset = 8,
- },
-@@ -5293,11 +5317,15 @@ static const struct mtk_soc_data mt7986_
- .hash_offset = 4,
- .has_accounting = true,
- .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma_v2),
-- .rxd_size = sizeof(struct mtk_rx_dma_v2),
-- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma_v2),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-+ .dma_len_offset = 8,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma_v2),
-+ .irq_done_mask = MTK_RX_DONE_INT_V2,
-+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
- .dma_len_offset = 8,
- },
-@@ -5315,11 +5343,15 @@ static const struct mtk_soc_data mt7988_
- .hash_offset = 4,
- .has_accounting = true,
- .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma_v2),
-- .rxd_size = sizeof(struct mtk_rx_dma_v2),
-- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma_v2),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-+ .dma_len_offset = 8,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma_v2),
-+ .irq_done_mask = MTK_RX_DONE_INT_V2,
-+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
- .dma_len_offset = 8,
- },
-@@ -5332,11 +5364,15 @@ static const struct mtk_soc_data rt5350_
- .required_clks = MT7628_CLKS_BITMAP,
- .required_pctl = false,
- .version = 1,
-- .txrx = {
-- .txd_size = sizeof(struct mtk_tx_dma),
-- .rxd_size = sizeof(struct mtk_rx_dma),
-- .rx_irq_done_mask = MTK_RX_DONE_INT,
-- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
-+ .tx = {
-+ .desc_size = sizeof(struct mtk_tx_dma),
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+ .dma_len_offset = 16,
-+ },
-+ .rx = {
-+ .desc_size = sizeof(struct mtk_rx_dma),
-+ .irq_done_mask = MTK_RX_DONE_INT,
-+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
- .dma_max_len = MTK_TX_DMA_BUF_LEN,
- .dma_len_offset = 16,
- },
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -326,8 +326,8 @@
- /* QDMA descriptor txd3 */
- #define TX_DMA_OWNER_CPU BIT(31)
- #define TX_DMA_LS0 BIT(30)
--#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
--#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
-+#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
-+#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
- #define TX_DMA_SWC BIT(14)
- #define TX_DMA_PQID GENMASK(3, 0)
- #define TX_DMA_ADDR64_MASK GENMASK(3, 0)
-@@ -347,8 +347,8 @@
- /* QDMA descriptor rxd2 */
- #define RX_DMA_DONE BIT(31)
- #define RX_DMA_LSO BIT(30)
--#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
--#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
-+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
-+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
- #define RX_DMA_VTAG BIT(15)
- #define RX_DMA_ADDR64_MASK GENMASK(3, 0)
- #if IS_ENABLED(CONFIG_64BIT)
-@@ -1279,10 +1279,9 @@ struct mtk_reg_map {
- * @foe_entry_size Foe table entry size.
- * @has_accounting Bool indicating support for accounting of
- * offloaded flows.
-- * @txd_size Tx DMA descriptor size.
-- * @rxd_size Rx DMA descriptor size.
-- * @rx_irq_done_mask Rx irq done register mask.
-- * @rx_dma_l4_valid Rx DMA valid register mask.
-+ * @desc_size Tx/Rx DMA descriptor size.
-+ * @irq_done_mask Rx irq done register mask.
-+ * @dma_l4_valid Rx DMA valid register mask.
- * @dma_max_len Max DMA tx/rx buffer length.
- * @dma_len_offset Tx/Rx DMA length field offset.
- */
-@@ -1300,13 +1299,17 @@ struct mtk_soc_data {
- bool has_accounting;
- bool disable_pll_modes;
- struct {
-- u32 txd_size;
-- u32 rxd_size;
-- u32 rx_irq_done_mask;
-- u32 rx_dma_l4_valid;
-+ u32 desc_size;
- u32 dma_max_len;
- u32 dma_len_offset;
-- } txrx;
-+ } tx;
-+ struct {
-+ u32 desc_size;
-+ u32 irq_done_mask;
-+ u32 dma_l4_valid;
-+ u32 dma_max_len;
-+ u32 dma_len_offset;
-+ } rx;
- };
-
- #define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
+++ /dev/null
-Date: Tue, 10 Oct 2023 21:06:43 +0200
-Subject: [PATCH net-next 2/2] net: ethernet: mediatek: use QDMA instead of
- ADMAv2 on MT7981 and MT7986
-
-ADMA is plagued by RX hangs which can't easily detected and happen upon
-receival of a corrupted package.
-Use QDMA just like on netsys v1 which is also still present and usable, and
-doesn't suffer from that problem.
-
----
- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 46 ++++++++++-----------
- 1 file changed, 23 insertions(+), 23 deletions(-)
-
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -108,16 +108,16 @@ static const struct mtk_reg_map mt7986_r
- .tx_irq_mask = 0x461c,
- .tx_irq_status = 0x4618,
- .pdma = {
-- .rx_ptr = 0x6100,
-- .rx_cnt_cfg = 0x6104,
-- .pcrx_ptr = 0x6108,
-- .glo_cfg = 0x6204,
-- .rst_idx = 0x6208,
-- .delay_irq = 0x620c,
-- .irq_status = 0x6220,
-- .irq_mask = 0x6228,
-- .adma_rx_dbg0 = 0x6238,
-- .int_grp = 0x6250,
-+ .rx_ptr = 0x4100,
-+ .rx_cnt_cfg = 0x4104,
-+ .pcrx_ptr = 0x4108,
-+ .glo_cfg = 0x4204,
-+ .rst_idx = 0x4208,
-+ .delay_irq = 0x420c,
-+ .irq_status = 0x4220,
-+ .irq_mask = 0x4228,
-+ .adma_rx_dbg0 = 0x4238,
-+ .int_grp = 0x4250,
- },
- .qdma = {
- .qtx_cfg = 0x4400,
-@@ -1207,7 +1207,7 @@ static bool mtk_rx_get_desc(struct mtk_e
- rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
- rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
- rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-- if (mtk_is_netsys_v2_or_greater(eth)) {
-+ if (mtk_is_netsys_v3_or_greater(eth)) {
- rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
- rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
- }
-@@ -2155,7 +2155,7 @@ static int mtk_poll_rx(struct napi_struc
- break;
-
- /* find out which mac the packet come from. values start at 1 */
-- if (mtk_is_netsys_v2_or_greater(eth)) {
-+ if (mtk_is_netsys_v3_or_greater(eth)) {
- u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
-
- switch (val) {
-@@ -2267,7 +2267,7 @@ static int mtk_poll_rx(struct napi_struc
- skb->dev = netdev;
- bytes += skb->len;
-
-- if (mtk_is_netsys_v2_or_greater(eth)) {
-+ if (mtk_is_netsys_v3_or_greater(eth)) {
- reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
- hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
- if (hash != MTK_RXD5_FOE_ENTRY)
-@@ -2808,7 +2808,7 @@ static int mtk_rx_alloc(struct mtk_eth *
-
- rxd->rxd3 = 0;
- rxd->rxd4 = 0;
-- if (mtk_is_netsys_v2_or_greater(eth)) {
-+ if (mtk_is_netsys_v3_or_greater(eth)) {
- rxd->rxd5 = 0;
- rxd->rxd6 = 0;
- rxd->rxd7 = 0;
-@@ -4011,7 +4011,7 @@ static int mtk_hw_init(struct mtk_eth *e
- else
- mtk_hw_reset(eth);
-
-- if (mtk_is_netsys_v2_or_greater(eth)) {
-+ if (mtk_is_netsys_v3_or_greater(eth)) {
- /* Set FE to PDMAv2 if necessary */
- val = mtk_r32(eth, MTK_FE_GLO_MISC);
- mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
-@@ -5297,11 +5297,11 @@ static const struct mtk_soc_data mt7981_
- .dma_len_offset = 8,
- },
- .rx = {
-- .desc_size = sizeof(struct mtk_rx_dma_v2),
-- .irq_done_mask = MTK_RX_DONE_INT_V2,
-+ .desc_size = sizeof(struct mtk_rx_dma),
-+ .irq_done_mask = MTK_RX_DONE_INT,
- .dma_l4_valid = RX_DMA_L4_VALID_V2,
-- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-- .dma_len_offset = 8,
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+ .dma_len_offset = 16,
- },
- };
-
-@@ -5323,11 +5323,11 @@ static const struct mtk_soc_data mt7986_
- .dma_len_offset = 8,
- },
- .rx = {
-- .desc_size = sizeof(struct mtk_rx_dma_v2),
-- .irq_done_mask = MTK_RX_DONE_INT_V2,
-+ .desc_size = sizeof(struct mtk_rx_dma),
-+ .irq_done_mask = MTK_RX_DONE_INT,
- .dma_l4_valid = RX_DMA_L4_VALID_V2,
-- .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
-- .dma_len_offset = 8,
-+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
-+ .dma_len_offset = 16,
- },
- };
-