.glo_cfg = 0x4604,
.rst_idx = 0x4608,
.delay_irq = 0x460c,
-@@ -4050,6 +4053,56 @@ static void mtk_set_mcr_max_rx(struct mt
+@@ -4053,6 +4056,56 @@ static void mtk_set_mcr_max_rx(struct mt
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
}
static void mtk_hw_reset(struct mtk_eth *eth)
{
u32 val;
-@@ -4529,6 +4582,8 @@ static void mtk_pending_work(struct work
+@@ -4532,6 +4585,8 @@ static void mtk_pending_work(struct work
rtnl_lock();
set_bit(MTK_RESETTING, ð->state);
+ .vlan_tci = skb_vlan_tag_get(skb),
+ .first = true,
+ };
-+
+
+- mtk_tx_set_dma_desc(dev, itxd, &txd_info);
+ offset = 0;
+ frag_size = skb_headlen(cur_skb);
+ if (cur_skb != skb) {
+ struct ipv6hdr *iph = ipv6_hdr(cur_skb);
+ struct ipv6hdr *iph2 = ipv6_hdr(skb);
-- mtk_tx_set_dma_desc(dev, itxd, &txd_info);
-+ mtk_tx_update_ip6addr(skb, iph, th, &iph->saddr,
-+ &iph2->saddr);
-+ mtk_tx_update_ip6addr(skb, iph, th, &iph->daddr,
-+ &iph2->daddr);
-+ }
-
- itx_buf->mac_id = mac->id;
- setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
- k++);
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- unsigned int offset = 0;
- int frag_size = skb_frag_size(frag);
-+ mtk_tx_update_port(skb, th, &th->source, th2->source);
-+ mtk_tx_update_port(skb, th, &th->dest, th2->dest);
++ mtk_tx_update_ip6addr(skb, iph, th, &iph->saddr,
++ &iph2->saddr);
++ mtk_tx_update_ip6addr(skb, iph, th, &iph->daddr,
++ &iph2->daddr);
++ }
- while (frag_size) {
- bool new_desc = true;
++ mtk_tx_update_port(skb, th, &th->source, th2->source);
++ mtk_tx_update_port(skb, th, &th->dest, th2->dest);
+
+- if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
+- (i & 0x1)) {
+- txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+- txd_pdma = qdma_to_pdma(ring, txd);
+- if (txd == ring->last_free)
+- goto err_dma;
+ offset = -header_len;
+ frag_size += header_len;
+ } else if (next_skb) {
+ struct iphdr *iph = ip_hdr(cur_skb);
+ __be16 ip_len_val = cpu_to_be16(ip_len);
-- if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
-- (i & 0x1)) {
-- txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
-- txd_pdma = qdma_to_pdma(ring, txd);
-- if (txd == ring->last_free)
-- goto err_dma;
+- n_desc++;
+- } else {
+- new_desc = false;
+- }
+ csum_replace2(&iph->check, iph->tot_len, ip_len_val);
+ iph->tot_len = ip_len_val;
+ } else {
+ struct ipv6hdr *iph = ipv6_hdr(cur_skb);
+ __be16 ip_len_val = cpu_to_be16(ip_len - sizeof(*iph));
-
-- n_desc++;
-- } else {
-- new_desc = false;
-- }
++
+ iph->payload_len = ip_len_val;
+ }
+ }
+ } else {
+ state.txd_pdma->txd2 |= TX_DMA_LS1;
+ }
- }
-
++ }
++
+ if (next_skb) {
+ cur_skb = next_skb;
+ next_skb = cur_skb->next;
+ goto next;
-+ }
-+
+ }
+
+ /* store skb to cleanup */
+ state.tx_buf->type = MTK_TYPE_SKB;
+ state.tx_buf->data = skb;
return nfrags;
}
-@@ -1609,9 +1742,26 @@ static bool mtk_skb_has_small_frag(struc
+@@ -1609,9 +1742,29 @@ static bool mtk_skb_has_small_frag(struc
if (skb_frag_size(&skb_shinfo(skb)->frags[i]) < min_size)
return true;
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+ return true;
+
++ if (skb_tnl_header_len(skb))
++ return false;
++
+ return skb_pagelen(skb) - header_len == skb_shinfo(skb)->gso_size &&
+ skb_headlen(skb) > header_len;
+}
static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
-@@ -1619,6 +1769,7 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1619,6 +1772,7 @@ static netdev_tx_t mtk_start_xmit(struct
struct mtk_tx_ring *ring = ð->tx_ring;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *segs, *next;
bool gso = false;
int tx_num;
-@@ -1647,37 +1798,42 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1647,37 +1801,42 @@ static netdev_tx_t mtk_start_xmit(struct
return NETDEV_TX_BUSY;
}
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -2302,7 +2302,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2305,7 +2305,7 @@ static int mtk_poll_rx(struct napi_struc
if (ret != XDP_PASS)
goto skip_rx;
if (unlikely(!skb)) {
page_pool_put_full_page(ring->page_pool,
page, true);
-@@ -2340,7 +2340,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2343,7 +2343,7 @@ static int mtk_poll_rx(struct napi_struc
dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
ring->buf_size, DMA_FROM_DEVICE);
/* QDMA Flow Control Register */
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3473,12 +3473,14 @@ static int mtk_start_dma(struct mtk_eth
+@@ -3476,12 +3476,14 @@ static int mtk_start_dma(struct mtk_eth
MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
.mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
-@@ -3581,6 +3726,9 @@ static int mtk_open(struct net_device *d
+@@ -3584,6 +3729,9 @@ static int mtk_open(struct net_device *d
ppe_num = eth->soc->ppe_num;
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
-@@ -3728,6 +3876,9 @@ static int mtk_stop(struct net_device *d
+@@ -3731,6 +3879,9 @@ static int mtk_stop(struct net_device *d
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_stop(eth->ppe[i]);
return 0;
}
-@@ -4818,6 +4969,7 @@ static const struct net_device_ops mtk_n
+@@ -4821,6 +4972,7 @@ static const struct net_device_ops mtk_n
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
{
const __be32 *_id = of_get_property(np, "reg", NULL);
phy_interface_t phy_mode;
struct phylink *phylink;
struct mtk_mac *mac;
-@@ -4856,16 +5008,41 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4859,16 +5011,41 @@ static int mtk_add_mac(struct mtk_eth *e
mac->id = id;
mac->hw = eth;
mac->of_node = np;
}
memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
-@@ -4948,8 +5125,21 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4951,8 +5128,21 @@ static int mtk_add_mac(struct mtk_eth *e
phy_interface_zero(mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
mac->phylink_config.supported_interfaces);
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
-@@ -5000,6 +5190,26 @@ free_netdev:
+@@ -5003,6 +5193,26 @@ free_netdev:
return err;
}
void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
{
struct net_device *dev, *tmp;
-@@ -5146,7 +5356,8 @@ static int mtk_probe(struct platform_dev
+@@ -5149,7 +5359,8 @@ static int mtk_probe(struct platform_dev
regmap_write(cci, 0, 3);
}
err = mtk_sgmii_init(eth);
if (err)
-@@ -5257,6 +5468,24 @@ static int mtk_probe(struct platform_dev
+@@ -5260,6 +5471,24 @@ static int mtk_probe(struct platform_dev
}
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
err = devm_request_irq(eth->dev, eth->irq[0],
mtk_handle_irq, 0,
-@@ -5367,6 +5596,11 @@ static void mtk_remove(struct platform_d
+@@ -5370,6 +5599,11 @@ static void mtk_remove(struct platform_d
mtk_stop(eth->netdev[i]);
mac = netdev_priv(eth->netdev[i]);
phylink_disconnect_phy(mac->phylink);
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -5634,7 +5634,7 @@ static const struct mtk_soc_data mt2701_
+@@ -5637,7 +5637,7 @@ static const struct mtk_soc_data mt2701_
DESC_SIZE(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5662,7 +5662,7 @@ static const struct mtk_soc_data mt7621_
+@@ -5665,7 +5665,7 @@ static const struct mtk_soc_data mt7621_
DESC_SIZE(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5692,7 +5692,7 @@ static const struct mtk_soc_data mt7622_
+@@ -5695,7 +5695,7 @@ static const struct mtk_soc_data mt7622_
DESC_SIZE(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5721,7 +5721,7 @@ static const struct mtk_soc_data mt7623_
+@@ -5724,7 +5724,7 @@ static const struct mtk_soc_data mt7623_
DESC_SIZE(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5747,7 +5747,7 @@ static const struct mtk_soc_data mt7629_
+@@ -5750,7 +5750,7 @@ static const struct mtk_soc_data mt7629_
DESC_SIZE(struct mtk_rx_dma),
.irq_done_mask = MTK_RX_DONE_INT,
.dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
-@@ -5779,7 +5779,7 @@ static const struct mtk_soc_data mt7981_
+@@ -5782,7 +5782,7 @@ static const struct mtk_soc_data mt7981_
.dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
-@@ -5809,7 +5809,7 @@ static const struct mtk_soc_data mt7986_
+@@ -5812,7 +5812,7 @@ static const struct mtk_soc_data mt7986_
.dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
};
-@@ -5862,7 +5862,7 @@ static const struct mtk_soc_data rt5350_
+@@ -5865,7 +5865,7 @@ static const struct mtk_soc_data rt5350_
.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
help
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4737,6 +4737,7 @@ static int mtk_get_sset_count(struct net
+@@ -4740,6 +4740,7 @@ static int mtk_get_sset_count(struct net
static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
{
struct page_pool_stats stats = {};
int i;
-@@ -4749,6 +4750,7 @@ static void mtk_ethtool_pp_stats(struct
+@@ -4752,6 +4753,7 @@ static void mtk_ethtool_pp_stats(struct
page_pool_get_stats(ring->page_pool, &stats);
}
page_pool_ethtool_stats_get(data, &stats);