1 From 9a2500ab22f059e596942172a8e4a60ae8243ce4 Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Thu, 1 Aug 2024 16:35:06 +0200
4 Subject: [PATCH 4/8] net: airoha: Add airoha_qdma pointer in
5 airoha_tx_irq_queue/airoha_queue structures
7 Move airoha_eth pointer in airoha_qdma structure from
8 airoha_tx_irq_queue/airoha_queue ones. This is a preliminary patch to
9 introduce support for multi-QDMA controllers available on EN7581.
11 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
12 Link: https://patch.msgid.link/074565b82fd0ceefe66e186f21133d825dbd48eb.1722522582.git.lorenzo@kernel.org
13 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
15 drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++-----------
16 1 file changed, 41 insertions(+), 43 deletions(-)
18 --- a/drivers/net/ethernet/mediatek/airoha_eth.c
19 +++ b/drivers/net/ethernet/mediatek/airoha_eth.c
20 @@ -728,7 +728,7 @@ struct airoha_queue_entry {
24 - struct airoha_eth *eth;
25 + struct airoha_qdma *qdma;
27 /* protect concurrent queue accesses */
29 @@ -747,7 +747,7 @@ struct airoha_queue {
32 struct airoha_tx_irq_queue {
33 - struct airoha_eth *eth;
34 + struct airoha_qdma *qdma;
36 struct napi_struct napi;
38 @@ -784,6 +784,7 @@ struct airoha_hw_stats {
42 + struct airoha_eth *eth;
45 /* protect concurrent irqmask accesses */
46 @@ -1388,8 +1389,8 @@ static int airoha_fe_init(struct airoha_
47 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
49 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
50 - struct airoha_qdma *qdma = &q->eth->qdma[0];
51 - struct airoha_eth *eth = q->eth;
52 + struct airoha_qdma *qdma = q->qdma;
53 + struct airoha_eth *eth = qdma->eth;
54 int qid = q - &qdma->q_rx[0];
57 @@ -1457,8 +1458,8 @@ static int airoha_qdma_get_gdm_port(stru
58 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
60 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
61 - struct airoha_qdma *qdma = &q->eth->qdma[0];
62 - struct airoha_eth *eth = q->eth;
63 + struct airoha_qdma *qdma = q->qdma;
64 + struct airoha_eth *eth = qdma->eth;
65 int qid = q - &qdma->q_rx[0];
68 @@ -1521,7 +1522,6 @@ static int airoha_qdma_rx_process(struct
69 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
71 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
72 - struct airoha_qdma *qdma = &q->eth->qdma[0];
76 @@ -1530,14 +1530,13 @@ static int airoha_qdma_rx_napi_poll(stru
77 } while (cur && done < budget);
79 if (done < budget && napi_complete(napi))
80 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1,
81 + airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
87 -static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
88 - struct airoha_queue *q,
89 +static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
90 struct airoha_qdma *qdma, int ndesc)
92 const struct page_pool_params pp_params = {
93 @@ -1548,15 +1547,16 @@ static int airoha_qdma_init_rx_queue(str
94 .dma_dir = DMA_FROM_DEVICE,
98 + .dev = qdma->eth->dev,
101 + struct airoha_eth *eth = qdma->eth;
102 int qid = q - &qdma->q_rx[0], thr;
105 q->buf_size = PAGE_SIZE / 2;
110 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
112 @@ -1596,7 +1596,7 @@ static int airoha_qdma_init_rx_queue(str
114 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
116 - struct airoha_eth *eth = q->eth;
117 + struct airoha_eth *eth = q->qdma->eth;
120 struct airoha_queue_entry *e = &q->entry[q->tail];
121 @@ -1610,8 +1610,7 @@ static void airoha_qdma_cleanup_rx_queue
125 -static int airoha_qdma_init_rx(struct airoha_eth *eth,
126 - struct airoha_qdma *qdma)
127 +static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
131 @@ -1623,8 +1622,8 @@ static int airoha_qdma_init_rx(struct ai
135 - err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i],
136 - qdma, RX_DSCP_NUM(i));
137 + err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
142 @@ -1640,9 +1639,9 @@ static int airoha_qdma_tx_napi_poll(stru
145 irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
147 - qdma = ð->qdma[0];
148 + qdma = irq_q->qdma;
149 id = irq_q - &qdma->q_tx_irq[0];
152 while (irq_q->queued > 0 && done < budget) {
153 u32 qid, last, val = irq_q->q[irq_q->head];
154 @@ -1724,16 +1723,16 @@ static int airoha_qdma_tx_napi_poll(stru
158 -static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
159 - struct airoha_queue *q,
160 +static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
161 struct airoha_qdma *qdma, int size)
163 + struct airoha_eth *eth = qdma->eth;
164 int i, qid = q - &qdma->q_tx[0];
167 spin_lock_init(&q->lock);
171 q->free_thr = 1 + MAX_SKB_FRAGS;
173 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
174 @@ -1762,11 +1761,11 @@ static int airoha_qdma_init_tx_queue(str
178 -static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
179 - struct airoha_tx_irq_queue *irq_q,
180 +static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
181 struct airoha_qdma *qdma, int size)
183 int id = irq_q - &qdma->q_tx_irq[0];
184 + struct airoha_eth *eth = qdma->eth;
187 netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
188 @@ -1778,7 +1777,7 @@ static int airoha_qdma_tx_irq_init(struc
190 memset(irq_q->q, 0xff, size * sizeof(u32));
193 + irq_q->qdma = qdma;
195 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
196 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
197 @@ -1789,21 +1788,20 @@ static int airoha_qdma_tx_irq_init(struc
201 -static int airoha_qdma_init_tx(struct airoha_eth *eth,
202 - struct airoha_qdma *qdma)
203 +static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
207 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
208 - err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i],
209 - qdma, IRQ_QUEUE_LEN(i));
210 + err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
216 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
217 - err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i],
218 - qdma, TX_DSCP_NUM);
219 + err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
224 @@ -1813,7 +1811,7 @@ static int airoha_qdma_init_tx(struct ai
226 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
228 - struct airoha_eth *eth = q->eth;
229 + struct airoha_eth *eth = q->qdma->eth;
231 spin_lock_bh(&q->lock);
233 @@ -1830,9 +1828,9 @@ static void airoha_qdma_cleanup_tx_queue
234 spin_unlock_bh(&q->lock);
237 -static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth,
238 - struct airoha_qdma *qdma)
239 +static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
241 + struct airoha_eth *eth = qdma->eth;
245 @@ -1870,8 +1868,7 @@ static int airoha_qdma_init_hfwd_queues(
249 -static void airoha_qdma_init_qos(struct airoha_eth *eth,
250 - struct airoha_qdma *qdma)
251 +static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
253 airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
254 airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
255 @@ -1921,8 +1918,7 @@ static void airoha_qdma_init_qos(struct
256 FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
259 -static int airoha_qdma_hw_init(struct airoha_eth *eth,
260 - struct airoha_qdma *qdma)
261 +static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
265 @@ -1959,7 +1955,7 @@ static int airoha_qdma_hw_init(struct ai
266 GLOBAL_CFG_TX_WB_DONE_MASK |
267 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
269 - airoha_qdma_init_qos(eth, qdma);
270 + airoha_qdma_init_qos(qdma);
272 /* disable qdma rx delay interrupt */
273 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
274 @@ -2035,6 +2031,8 @@ static int airoha_qdma_init(struct platf
277 spin_lock_init(&qdma->irq_lock);
280 qdma->irq = platform_get_irq(pdev, 0);
283 @@ -2044,19 +2042,19 @@ static int airoha_qdma_init(struct platf
287 - err = airoha_qdma_init_rx(eth, qdma);
288 + err = airoha_qdma_init_rx(qdma);
292 - err = airoha_qdma_init_tx(eth, qdma);
293 + err = airoha_qdma_init_tx(qdma);
297 - err = airoha_qdma_init_hfwd_queues(eth, qdma);
298 + err = airoha_qdma_init_hfwd_queues(qdma);
302 - err = airoha_qdma_hw_init(eth, qdma);
303 + err = airoha_qdma_hw_init(qdma);