1 From 19e47fc2aeda3a657c4f64144ffd6e65f7a66601 Mon Sep 17 00:00:00 2001
2 From: Lorenzo Bianconi <lorenzo@kernel.org>
3 Date: Thu, 1 Aug 2024 16:35:05 +0200
4 Subject: [PATCH 3/8] net: airoha: Move irq_mask in airoha_qdma structure
6 QDMA controllers have independent irq lines, so move irqmask in
7 airoha_qdma structure. This is a preliminary patch to support multiple
10 Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
11 Link: https://patch.msgid.link/1c8a06e8be605278a7b2f3cd8ac06e74bf5ebf2b.1722522582.git.lorenzo@kernel.org
12 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
14 drivers/net/ethernet/mediatek/airoha_eth.c | 84 +++++++++++-----------
15 1 file changed, 42 insertions(+), 42 deletions(-)
17 --- a/drivers/net/ethernet/mediatek/airoha_eth.c
18 +++ b/drivers/net/ethernet/mediatek/airoha_eth.c
19 @@ -786,6 +786,11 @@ struct airoha_hw_stats {
23 + /* protect concurrent irqmask accesses */
24 + spinlock_t irq_lock;
25 + u32 irqmask[QDMA_INT_REG_MAX];
28 struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
30 struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
31 @@ -812,11 +817,6 @@ struct airoha_eth {
33 void __iomem *fe_regs;
35 - /* protect concurrent irqmask accesses */
36 - spinlock_t irq_lock;
37 - u32 irqmask[QDMA_INT_REG_MAX];
40 struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
41 struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
43 @@ -866,38 +866,37 @@ static u32 airoha_rmw(void __iomem *base
44 #define airoha_qdma_clear(qdma, offset, val) \
45 airoha_rmw((qdma)->regs, (offset), (val), 0)
47 -static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
48 +static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
53 - if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
54 + if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
57 - spin_lock_irqsave(ð->irq_lock, flags);
58 + spin_lock_irqsave(&qdma->irq_lock, flags);
60 - eth->irqmask[index] &= ~clear;
61 - eth->irqmask[index] |= set;
62 - airoha_qdma_wr(ð->qdma[0], REG_INT_ENABLE(index),
63 - eth->irqmask[index]);
64 + qdma->irqmask[index] &= ~clear;
65 + qdma->irqmask[index] |= set;
66 + airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
67 /* Read irq_enable register in order to guarantee the update above
68 * completes in the spinlock critical section.
70 - airoha_qdma_rr(ð->qdma[0], REG_INT_ENABLE(index));
71 + airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
73 - spin_unlock_irqrestore(ð->irq_lock, flags);
74 + spin_unlock_irqrestore(&qdma->irq_lock, flags);
77 -static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
78 +static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
81 - airoha_qdma_set_irqmask(eth, index, 0, mask);
82 + airoha_qdma_set_irqmask(qdma, index, 0, mask);
85 -static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
86 +static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
89 - airoha_qdma_set_irqmask(eth, index, mask, 0);
90 + airoha_qdma_set_irqmask(qdma, index, mask, 0);
93 static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
94 @@ -1522,7 +1521,7 @@ static int airoha_qdma_rx_process(struct
95 static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
97 struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
98 - struct airoha_eth *eth = q->eth;
99 + struct airoha_qdma *qdma = &q->eth->qdma[0];
103 @@ -1531,7 +1530,7 @@ static int airoha_qdma_rx_napi_poll(stru
104 } while (cur && done < budget);
106 if (done < budget && napi_complete(napi))
107 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
108 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1,
112 @@ -1719,7 +1718,7 @@ static int airoha_qdma_tx_napi_poll(stru
115 if (done < budget && napi_complete(napi))
116 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
117 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
118 TX_DONE_INT_MASK(id));
121 @@ -1928,13 +1927,13 @@ static int airoha_qdma_hw_init(struct ai
124 /* clear pending irqs */
125 - for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
126 + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
127 airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
130 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
131 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
132 - airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
133 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
134 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
135 + airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
137 /* setup irq binding */
138 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
139 @@ -1980,14 +1979,13 @@ static int airoha_qdma_hw_init(struct ai
140 static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
142 struct airoha_eth *eth = dev_instance;
143 - u32 intr[ARRAY_SIZE(eth->irqmask)];
144 - struct airoha_qdma *qdma;
145 + struct airoha_qdma *qdma = ð->qdma[0];
146 + u32 intr[ARRAY_SIZE(qdma->irqmask)];
149 - qdma = ð->qdma[0];
150 - for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
151 + for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
152 intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
153 - intr[i] &= eth->irqmask[i];
154 + intr[i] &= qdma->irqmask[i];
155 airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
158 @@ -1995,7 +1993,7 @@ static irqreturn_t airoha_irq_handler(in
161 if (intr[1] & RX_DONE_INT_MASK) {
162 - airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
163 + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
166 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
167 @@ -2015,7 +2013,7 @@ static irqreturn_t airoha_irq_handler(in
168 if (!(intr[0] & TX_DONE_INT_MASK(i)))
171 - airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
172 + airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
173 TX_DONE_INT_MASK(i));
175 status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(i));
176 @@ -2030,12 +2028,18 @@ static irqreturn_t airoha_irq_handler(in
180 -static int airoha_qdma_init(struct airoha_eth *eth)
181 +static int airoha_qdma_init(struct platform_device *pdev,
182 + struct airoha_eth *eth)
184 struct airoha_qdma *qdma = ð->qdma[0];
187 - err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
188 + spin_lock_init(&qdma->irq_lock);
189 + qdma->irq = platform_get_irq(pdev, 0);
193 + err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
194 IRQF_SHARED, KBUILD_MODNAME, eth);
197 @@ -2061,7 +2065,8 @@ static int airoha_qdma_init(struct airoh
201 -static int airoha_hw_init(struct airoha_eth *eth)
202 +static int airoha_hw_init(struct platform_device *pdev,
203 + struct airoha_eth *eth)
207 @@ -2077,7 +2082,7 @@ static int airoha_hw_init(struct airoha_
211 - return airoha_qdma_init(eth);
212 + return airoha_qdma_init(pdev, eth);
215 static void airoha_hw_cleanup(struct airoha_eth *eth)
216 @@ -2674,11 +2679,6 @@ static int airoha_probe(struct platform_
220 - spin_lock_init(ð->irq_lock);
221 - eth->irq = platform_get_irq(pdev, 0);
225 eth->napi_dev = alloc_netdev_dummy(0);
228 @@ -2688,7 +2688,7 @@ static int airoha_probe(struct platform_
229 strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
230 platform_set_drvdata(pdev, eth);
232 - err = airoha_hw_init(eth);
233 + err = airoha_hw_init(pdev, eth);