b73fc34ef1beda13f4ee256263b544510c3ef25d
[openwrt/staging/thess.git] /
1 From 9a2500ab22f059e596942172a8e4a60ae8243ce4 Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Thu, 1 Aug 2024 16:35:06 +0200
4 Subject: [PATCH 4/8] net: airoha: Add airoha_qdma pointer in
5 airoha_tx_irq_queue/airoha_queue structures
6
7 Move airoha_eth pointer in airoha_qdma structure from
8 airoha_tx_irq_queue/airoha_queue ones. This is a preliminary patch to
9 introduce support for multi-QDMA controllers available on EN7581.
10
11 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
12 Link: https://patch.msgid.link/074565b82fd0ceefe66e186f21133d825dbd48eb.1722522582.git.lorenzo@kernel.org
13 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
14 ---
15 drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++-----------
16 1 file changed, 41 insertions(+), 43 deletions(-)
17
18 --- a/drivers/net/ethernet/mediatek/airoha_eth.c
19 +++ b/drivers/net/ethernet/mediatek/airoha_eth.c
20 @@ -728,7 +728,7 @@ struct airoha_queue_entry {
21 };
22
23 struct airoha_queue {
24 - struct airoha_eth *eth;
25 + struct airoha_qdma *qdma;
26
27 /* protect concurrent queue accesses */
28 spinlock_t lock;
29 @@ -747,7 +747,7 @@ struct airoha_queue {
30 };
31
32 struct airoha_tx_irq_queue {
33 - struct airoha_eth *eth;
34 + struct airoha_qdma *qdma;
35
36 struct napi_struct napi;
37 u32 *q;
38 @@ -784,6 +784,7 @@ struct airoha_hw_stats {
39 };
40
41 struct airoha_qdma {
42 + struct airoha_eth *eth;
43 void __iomem *regs;
44
45 /* protect concurrent irqmask accesses */
46 @@ -1388,8 +1389,8 @@ static int airoha_fe_init(struct airoha_
47 static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
48 {
49 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
50 - struct airoha_qdma *qdma = &q->eth->qdma[0];
51 - struct airoha_eth *eth = q->eth;
52 + struct airoha_qdma *qdma = q->qdma;
53 + struct airoha_eth *eth = qdma->eth;
54 int qid = q - &qdma->q_rx[0];
55 int nframes = 0;
56
57 @@ -1457,8 +1458,8 @@ static int airoha_qdma_get_gdm_port(stru
58 static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
59 {
60 enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
61 - struct airoha_qdma *qdma = &q->eth->qdma[0];
62 - struct airoha_eth *eth = q->eth;
63 + struct airoha_qdma *qdma = q->qdma;
64 + struct airoha_eth *eth = qdma->eth;
65 int qid = q - &qdma->q_rx[0];
66 int done = 0;
67
68 @@ -1521,7 +1522,6 @@ static int airoha_qdma_rx_process(struct
69 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
70 {
71 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
72 - struct airoha_qdma *qdma = &q->eth->qdma[0];
73 int cur, done = 0;
74
75 do {
76 @@ -1530,14 +1530,13 @@ static int airoha_qdma_rx_napi_poll(stru
77 } while (cur && done < budget);
78
79 if (done < budget && napi_complete(napi))
80 - airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1,
81 + airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
82 RX_DONE_INT_MASK);
83
84 return done;
85 }
86
87 -static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
88 - struct airoha_queue *q,
89 +static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
90 struct airoha_qdma *qdma, int ndesc)
91 {
92 const struct page_pool_params pp_params = {
93 @@ -1548,15 +1547,16 @@ static int airoha_qdma_init_rx_queue(str
94 .dma_dir = DMA_FROM_DEVICE,
95 .max_len = PAGE_SIZE,
96 .nid = NUMA_NO_NODE,
97 - .dev = eth->dev,
98 + .dev = qdma->eth->dev,
99 .napi = &q->napi,
100 };
101 + struct airoha_eth *eth = qdma->eth;
102 int qid = q - &qdma->q_rx[0], thr;
103 dma_addr_t dma_addr;
104
105 q->buf_size = PAGE_SIZE / 2;
106 q->ndesc = ndesc;
107 - q->eth = eth;
108 + q->qdma = qdma;
109
110 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
111 GFP_KERNEL);
112 @@ -1596,7 +1596,7 @@ static int airoha_qdma_init_rx_queue(str
113
114 static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
115 {
116 - struct airoha_eth *eth = q->eth;
117 + struct airoha_eth *eth = q->qdma->eth;
118
119 while (q->queued) {
120 struct airoha_queue_entry *e = &q->entry[q->tail];
121 @@ -1610,8 +1610,7 @@ static void airoha_qdma_cleanup_rx_queue
122 }
123 }
124
125 -static int airoha_qdma_init_rx(struct airoha_eth *eth,
126 - struct airoha_qdma *qdma)
127 +static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
128 {
129 int i;
130
131 @@ -1623,8 +1622,8 @@ static int airoha_qdma_init_rx(struct ai
132 continue;
133 }
134
135 - err = airoha_qdma_init_rx_queue(eth, &qdma->q_rx[i],
136 - qdma, RX_DSCP_NUM(i));
137 + err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
138 + RX_DSCP_NUM(i));
139 if (err)
140 return err;
141 }
142 @@ -1640,9 +1639,9 @@ static int airoha_qdma_tx_napi_poll(stru
143 int id, done = 0;
144
145 irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
146 - eth = irq_q->eth;
147 - qdma = &eth->qdma[0];
148 + qdma = irq_q->qdma;
149 id = irq_q - &qdma->q_tx_irq[0];
150 + eth = qdma->eth;
151
152 while (irq_q->queued > 0 && done < budget) {
153 u32 qid, last, val = irq_q->q[irq_q->head];
154 @@ -1724,16 +1723,16 @@ static int airoha_qdma_tx_napi_poll(stru
155 return done;
156 }
157
158 -static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
159 - struct airoha_queue *q,
160 +static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
161 struct airoha_qdma *qdma, int size)
162 {
163 + struct airoha_eth *eth = qdma->eth;
164 int i, qid = q - &qdma->q_tx[0];
165 dma_addr_t dma_addr;
166
167 spin_lock_init(&q->lock);
168 q->ndesc = size;
169 - q->eth = eth;
170 + q->qdma = qdma;
171 q->free_thr = 1 + MAX_SKB_FRAGS;
172
173 q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
174 @@ -1762,11 +1761,11 @@ static int airoha_qdma_init_tx_queue(str
175 return 0;
176 }
177
178 -static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
179 - struct airoha_tx_irq_queue *irq_q,
180 +static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
181 struct airoha_qdma *qdma, int size)
182 {
183 int id = irq_q - &qdma->q_tx_irq[0];
184 + struct airoha_eth *eth = qdma->eth;
185 dma_addr_t dma_addr;
186
187 netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
188 @@ -1778,7 +1777,7 @@ static int airoha_qdma_tx_irq_init(struc
189
190 memset(irq_q->q, 0xff, size * sizeof(u32));
191 irq_q->size = size;
192 - irq_q->eth = eth;
193 + irq_q->qdma = qdma;
194
195 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
196 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
197 @@ -1789,21 +1788,20 @@ static int airoha_qdma_tx_irq_init(struc
198 return 0;
199 }
200
201 -static int airoha_qdma_init_tx(struct airoha_eth *eth,
202 - struct airoha_qdma *qdma)
203 +static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
204 {
205 int i, err;
206
207 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
208 - err = airoha_qdma_tx_irq_init(eth, &qdma->q_tx_irq[i],
209 - qdma, IRQ_QUEUE_LEN(i));
210 + err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
211 + IRQ_QUEUE_LEN(i));
212 if (err)
213 return err;
214 }
215
216 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
217 - err = airoha_qdma_init_tx_queue(eth, &qdma->q_tx[i],
218 - qdma, TX_DSCP_NUM);
219 + err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
220 + TX_DSCP_NUM);
221 if (err)
222 return err;
223 }
224 @@ -1813,7 +1811,7 @@ static int airoha_qdma_init_tx(struct ai
225
226 static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
227 {
228 - struct airoha_eth *eth = q->eth;
229 + struct airoha_eth *eth = q->qdma->eth;
230
231 spin_lock_bh(&q->lock);
232 while (q->queued) {
233 @@ -1830,9 +1828,9 @@ static void airoha_qdma_cleanup_tx_queue
234 spin_unlock_bh(&q->lock);
235 }
236
237 -static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth,
238 - struct airoha_qdma *qdma)
239 +static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
240 {
241 + struct airoha_eth *eth = qdma->eth;
242 dma_addr_t dma_addr;
243 u32 status;
244 int size;
245 @@ -1870,8 +1868,7 @@ static int airoha_qdma_init_hfwd_queues(
246 REG_LMGR_INIT_CFG);
247 }
248
249 -static void airoha_qdma_init_qos(struct airoha_eth *eth,
250 - struct airoha_qdma *qdma)
251 +static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
252 {
253 airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
254 airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
255 @@ -1921,8 +1918,7 @@ static void airoha_qdma_init_qos(struct
256 FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
257 }
258
259 -static int airoha_qdma_hw_init(struct airoha_eth *eth,
260 - struct airoha_qdma *qdma)
261 +static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
262 {
263 int i;
264
265 @@ -1959,7 +1955,7 @@ static int airoha_qdma_hw_init(struct ai
266 GLOBAL_CFG_TX_WB_DONE_MASK |
267 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
268
269 - airoha_qdma_init_qos(eth, qdma);
270 + airoha_qdma_init_qos(qdma);
271
272 /* disable qdma rx delay interrupt */
273 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
274 @@ -2035,6 +2031,8 @@ static int airoha_qdma_init(struct platf
275 int err;
276
277 spin_lock_init(&qdma->irq_lock);
278 + qdma->eth = eth;
279 +
280 qdma->irq = platform_get_irq(pdev, 0);
281 if (qdma->irq < 0)
282 return qdma->irq;
283 @@ -2044,19 +2042,19 @@ static int airoha_qdma_init(struct platf
284 if (err)
285 return err;
286
287 - err = airoha_qdma_init_rx(eth, qdma);
288 + err = airoha_qdma_init_rx(qdma);
289 if (err)
290 return err;
291
292 - err = airoha_qdma_init_tx(eth, qdma);
293 + err = airoha_qdma_init_tx(qdma);
294 if (err)
295 return err;
296
297 - err = airoha_qdma_init_hfwd_queues(eth, qdma);
298 + err = airoha_qdma_init_hfwd_queues(qdma);
299 if (err)
300 return err;
301
302 - err = airoha_qdma_hw_init(eth, qdma);
303 + err = airoha_qdma_hw_init(qdma);
304 if (err)
305 return err;
306