8635746410bd6d91df363cc2613773126b15fa7a
[openwrt/openwrt.git] /
1 From patchwork Wed Nov 20 09:15:04 2024
2 Content-Type: text/plain; charset="utf-8"
3 MIME-Version: 1.0
4 Content-Transfer-Encoding: 7bit
5 X-Patchwork-Submitter: Md Sadre Alam <quic_mdalam@quicinc.com>
6 X-Patchwork-Id: 2013501
7 Return-Path:
8 <linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org>
9 X-Original-To: incoming@patchwork.ozlabs.org
10 Delivered-To: patchwork-incoming@legolas.ozlabs.org
11 Authentication-Results: legolas.ozlabs.org;
12 dkim=pass (2048-bit key;
13 secure) header.d=lists.infradead.org header.i=@lists.infradead.org
14 header.a=rsa-sha256 header.s=bombadil.20210309 header.b=EI114Wyi;
15 dkim=fail reason="signature verification failed" (2048-bit key;
16 unprotected) header.d=quicinc.com header.i=@quicinc.com header.a=rsa-sha256
17 header.s=qcppdkim1 header.b=WgJ5on5Q;
18 dkim-atps=neutral
19 Authentication-Results: legolas.ozlabs.org;
20 spf=none (no SPF record) smtp.mailfrom=lists.infradead.org
21 (client-ip=2607:7c80:54:3::133; helo=bombadil.infradead.org;
22 envelope-from=linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org;
23 receiver=patchwork.ozlabs.org)
24 Received: from bombadil.infradead.org (bombadil.infradead.org
25 [IPv6:2607:7c80:54:3::133])
26 (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits)
27 key-exchange X25519 server-signature ECDSA (secp384r1) server-digest SHA384)
28 (No client certificate requested)
29 by legolas.ozlabs.org (Postfix) with ESMTPS id 4XtbMW1JfFz1xyG
30 for <incoming@patchwork.ozlabs.org>; Wed, 20 Nov 2024 20:16:23 +1100 (AEDT)
31 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
32 d=lists.infradead.org; s=bombadil.20210309; h=Sender:
33 Content-Transfer-Encoding:Content-Type:List-Subscribe:List-Help:List-Post:
34 List-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:
35 Message-ID:Date:Subject:CC:To:From:Reply-To:Content-ID:Content-Description:
36 Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:
37 List-Owner; bh=HhmgnJkVgny0u3hKiTd0ZM4cwdwqDHQn7f5raP0Z97Q=; b=EI114WyiL0woYU
38 WPjsi5XhHUI2CGLrmf8w0ScC9Pq7PFPwTPGxLYcoSh0JNv/FIax+J93VC5zYRp68aD4mTdTjU6bra
39 kTYGxoBj28Lwiy1jmYm0SWp8dPzX2hCg1lNWr9mP+JjnhuZpyt26qUJnshLyQbVgv8uoEiYGpJjIv
40 GBwOKIvs0vkaJy2E1MBvCO2/+qlNpOlz83wg1tF8uS4MJF+fjWgmVHBrXfRoTlEk6CJumTvpluqNx
41 nzaRMXONO/wYP03WjutalMhZJsQpInGIZhhKL8TOksrLF2q2b4gzF1JAucwPq78p0bWAcgS2ih8vP
42 564SGleqnhz8GQTtUxog==;
43 Received: from localhost ([::1] helo=bombadil.infradead.org)
44 by bombadil.infradead.org with esmtp (Exim 4.98 #2 (Red Hat Linux))
45 id 1tDgom-0000000Esn9-19FP;
46 Wed, 20 Nov 2024 09:16:12 +0000
47 Received: from mx0b-0031df01.pphosted.com ([205.220.180.131])
48 by bombadil.infradead.org with esmtps (Exim 4.98 #2 (Red Hat Linux))
49 id 1tDgoi-0000000EsiZ-1EXE
50 for linux-mtd@lists.infradead.org;
51 Wed, 20 Nov 2024 09:16:10 +0000
52 Received: from pps.filterd (m0279873.ppops.net [127.0.0.1])
53 by mx0a-0031df01.pphosted.com (8.18.1.2/8.18.1.2) with ESMTP id
54 4AK9FRJU010557;
55 Wed, 20 Nov 2024 09:16:00 GMT
56 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quicinc.com; h=
57 cc:content-transfer-encoding:content-type:date:from:in-reply-to
58 :message-id:mime-version:references:subject:to; s=qcppdkim1; bh=
59 5iRCp4ZP77j9GA9UB1rMArUv8juFZCi6KdJ0Pw/qPiM=; b=WgJ5on5QibUuTZPB
60 Ps89rgba/1Sz4aAyNPfSD9p3o4Lkpefh8wQBsnNvyMAqm6bP9GqT4GsoLIWzh2iL
61 f+NE+/ukvlaLa7P7MQHd14J0blNY2tD9ooc8NodYJJNu1Ul84oegXRuGSTvqh5h7
62 Xm2jOZDpHs7GIB/opkfbaLyFkMdDmMo8zBjL338260tHaKQf5vc62PNv6I5roF2O
63 QictV4vxOJvogxIcNrtAtGwkgsY8+UKYiK0Kf5J7JV7QpK1ejDCqv1NcNPu/87tp
64 u1a5iWy5iFKPldKgO0B/sAvGc2y8tw3Td8lnu2+CQAdyNMVUnBcB71XGvkrINO+y
65 vVURBw==
66 Received: from nasanppmta02.qualcomm.com (i-global254.qualcomm.com
67 [199.106.103.254])
68 by mx0a-0031df01.pphosted.com (PPS) with ESMTPS id 43091mda79-1
69 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT);
70 Wed, 20 Nov 2024 09:15:59 +0000 (GMT)
71 Received: from nasanex01a.na.qualcomm.com (nasanex01a.na.qualcomm.com
72 [10.52.223.231])
73 by NASANPPMTA02.qualcomm.com (8.18.1.2/8.18.1.2) with ESMTPS id
74 4AK9FwXk025630
75 (version=TLSv1.2 cipher=ECDHE-RSA-AES256-GCM-SHA384 bits=256 verify=NOT);
76 Wed, 20 Nov 2024 09:15:58 GMT
77 Received: from hu-mdalam-blr.qualcomm.com (10.80.80.8) by
78 nasanex01a.na.qualcomm.com (10.52.223.231) with Microsoft SMTP Server
79 (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id
80 15.2.1544.9; Wed, 20 Nov 2024 01:15:53 -0800
81 From: Md Sadre Alam <quic_mdalam@quicinc.com>
82 To: <broonie@kernel.org>, <robh@kernel.org>, <krzk+dt@kernel.org>,
83 <conor+dt@kernel.org>, <andersson@kernel.org>,
84 <konradybcio@kernel.org>, <miquel.raynal@bootlin.com>,
85 <richard@nod.at>, <vigneshr@ti.com>,
86 <manivannan.sadhasivam@linaro.org>, <linux-arm-msm@vger.kernel.org>,
87 <linux-spi@vger.kernel.org>, <devicetree@vger.kernel.org>,
88 <linux-kernel@vger.kernel.org>, <linux-mtd@lists.infradead.org>
89 CC: <quic_srichara@quicinc.com>, <quic_varada@quicinc.com>,
90 <quic_mdalam@quicinc.com>
91 Subject: [PATCH v14 6/8] spi: spi-qpic: add driver for QCOM SPI NAND flash
92 Interface
93 Date: Wed, 20 Nov 2024 14:45:04 +0530
94 Message-ID: <20241120091507.1404368-7-quic_mdalam@quicinc.com>
95 X-Mailer: git-send-email 2.34.1
96 In-Reply-To: <20241120091507.1404368-1-quic_mdalam@quicinc.com>
97 References: <20241120091507.1404368-1-quic_mdalam@quicinc.com>
98 MIME-Version: 1.0
99 X-Originating-IP: [10.80.80.8]
100 X-ClientProxiedBy: nasanex01a.na.qualcomm.com (10.52.223.231) To
101 nasanex01a.na.qualcomm.com (10.52.223.231)
102 X-QCInternal: smtphost
103 X-Proofpoint-Virus-Version: vendor=nai engine=6200 definitions=5800
104 signatures=585085
105 X-Proofpoint-ORIG-GUID: Ma2z9oeQxcw35J8DzEJ64cF8a3mxAzPO
106 X-Proofpoint-GUID: Ma2z9oeQxcw35J8DzEJ64cF8a3mxAzPO
107 X-Proofpoint-Virus-Version: vendor=baseguard
108 engine=ICAP:2.0.293,Aquarius:18.0.1039,Hydra:6.0.680,FMLib:17.12.60.29
109 definitions=2024-09-06_09,2024-09-06_01,2024-09-02_01
110 X-Proofpoint-Spam-Details: rule=outbound_notspam policy=outbound score=0
111 suspectscore=0
112 impostorscore=0 spamscore=0 priorityscore=1501 phishscore=0 malwarescore=0
113 clxscore=1015 bulkscore=0 mlxlogscore=999 lowpriorityscore=0 adultscore=0
114 mlxscore=0 classifier=spam adjust=0 reason=mlx scancount=1
115 engine=8.19.0-2409260000 definitions=main-2411200063
116 X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3
117 X-CRM114-CacheID: sfid-20241120_011608_535555_EC8A73C5
118 X-CRM114-Status: GOOD ( 24.86 )
119 X-Spam-Score: -2.8 (--)
120 X-Spam-Report: Spam detection software,
121 running on the system "bombadil.infradead.org",
122 has NOT identified this incoming email as spam. The original
123 message has been attached to this so you can view it or label
124 similar future email. If you have any questions, see
125 the administrator of that system for details.
126 Content preview: This driver implements support for the SPI-NAND mode of
127 QCOM
128 NAND Flash Interface as a SPI-MEM controller with pipelined ECC
129 capability.
130 Co-developed-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
131 Signed-off-by:
132 Sricharan Ramabadhran <quic_srichara@quicinc.com> Co-developed-by:
133 Varadarajan
134 Narayanan <quic_varada@quicinc.com> Sig [...]
135 Content analysis details: (-2.8 points, 5.0 required)
136 pts rule name description
137 ---- ----------------------
138 --------------------------------------------------
139 0.0 RCVD_IN_VALIDITY_RPBL_BLOCKED RBL: ADMINISTRATOR NOTICE: The query to
140 Validity was blocked. See
141 https://knowledge.validity.com/hc/en-us/articles/20961730681243
142 for more information.
143 [205.220.180.131 listed in
144 bl.score.senderscore.com]
145 0.0 RCVD_IN_VALIDITY_SAFE_BLOCKED RBL: ADMINISTRATOR NOTICE: The query to
146 Validity was blocked. See
147 https://knowledge.validity.com/hc/en-us/articles/20961730681243
148 for more information.
149 [205.220.180.131 listed in
150 sa-accredit.habeas.com]
151 -0.7 RCVD_IN_DNSWL_LOW RBL: Sender listed at https://www.dnswl.org/, low
152 trust
153 [205.220.180.131 listed in list.dnswl.org]
154 0.0 RCVD_IN_VALIDITY_CERTIFIED_BLOCKED RBL: ADMINISTRATOR NOTICE: The
155 query to Validity was blocked. See
156 https://knowledge.validity.com/hc/en-us/articles/20961730681243
157 for more information.
158 [205.220.180.131 listed in
159 sa-trusted.bondedsender.org]
160 -0.0 SPF_PASS SPF: sender matches SPF record
161 0.0 SPF_HELO_NONE SPF: HELO does not publish an SPF Record
162 -0.1 DKIM_VALID Message has at least one valid DKIM or DK
163 signature
164 -0.1 DKIM_VALID_EF Message has a valid DKIM or DK signature from
165 envelope-from domain
166 -0.1 DKIM_VALID_AU Message has a valid DKIM or DK signature from
167 author's
168 domain
169 0.1 DKIM_SIGNED Message has a DKIM or DK signature,
170 not necessarily valid
171 -1.9 BAYES_00 BODY: Bayes spam probability is 0 to 1%
172 [score: 0.0000]
173 X-BeenThere: linux-mtd@lists.infradead.org
174 X-Mailman-Version: 2.1.34
175 Precedence: list
176 List-Id: Linux MTD discussion mailing list <linux-mtd.lists.infradead.org>
177 List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-mtd>,
178 <mailto:linux-mtd-request@lists.infradead.org?subject=unsubscribe>
179 List-Archive: <http://lists.infradead.org/pipermail/linux-mtd/>
180 List-Post: <mailto:linux-mtd@lists.infradead.org>
181 List-Help: <mailto:linux-mtd-request@lists.infradead.org?subject=help>
182 List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mtd>,
183 <mailto:linux-mtd-request@lists.infradead.org?subject=subscribe>
184 Sender: "linux-mtd" <linux-mtd-bounces@lists.infradead.org>
185 Errors-To: linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org
186
187 This driver implements support for the SPI-NAND mode of QCOM NAND Flash
188 Interface as a SPI-MEM controller with pipelined ECC capability.
189
190 Co-developed-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
191 Signed-off-by: Sricharan Ramabadhran <quic_srichara@quicinc.com>
192 Co-developed-by: Varadarajan Narayanan <quic_varada@quicinc.com>
193 Signed-off-by: Varadarajan Narayanan <quic_varada@quicinc.com>
194 Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
195 ---
196
197 Change in [v14]
198
199 * No Change
200
201 Change in [v13]
202
203 * Changed return type of qcom_spi_cmd_mapping() from u32 to
204 int to fix the kernel test bot warning
205 * Changed type of variable cmd in qcom_spi_write_page() from u32
206 to int
207 * Removed unused variable s_op from qcom_spi_write_page()
208 * Updated return value variable type from u32 to int in
209 qcom_spi_send_cmdaddr()
210
211 Change in [v12]
212
213 * Added obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o in Makefile
214 to build qpic_common.c based on CONFIG_SPI_QPIC_SNAND
215
216 Change in [v11]
217
218 * Fixed build error reported by kernel test bot
219 * Changed "depends on MTD" to "select MTD" in
220 drivers/spi/Kconfig file
221
222 Change in [v10]
223
224 * Fixed compilation warnings reported by kernel test robot.
225 * Added depends on CONFIG_MTD
226 * removed extra bracket from statement if (i == (num_cw - 1)) in
227 qcom_spi_program_raw() api.
228
229 Change in [v9]
230
231 * Changed data type of addr1, addr2, cmd, to __le32 in qpic_spi_nand
232 structure
233 * In qcom_spi_set_read_loc_first() api added cpu_to_le32() macro to fix
234 compilation warning
235 * In qcom_spi_set_read_loc_last() api added cpu_to_le32() macro to fix
236 compilation warning
237 * In qcom_spi_init() api added cpu_to_le32() macro to fix compilation
238 warning
239 * In qcom_spi_ecc_init_ctx_pipelined() api removed unused variables
240 reqs, user, step_size, strength and added cpu_to_le32() macro as well
241 to fix compilation warning
242 * In qcom_spi_read_last_cw() api added cpu_to_le32() macro to fix compilation
243 warning
244 * In qcom_spi_check_error() api added cpu_to_le32() macro to fix compilation
245 warning
246 * In qcom_spi_read_page_ecc() api added cpu_to_le32() macro to fix compilation
247 warning
248 * In qcom_spi_read_page_oob() api added cpu_to_le32() macro to fix compilation
249 warning
250 * In qcom_spi_program_raw() api added cpu_to_le32() macro to fix compilation
251 warning
252 * In qcom_spi_program_ecc() api added cpu_to_le32() macro to fix compilation
253 warning
254 * In qcom_spi_program_oob() api added cpu_to_le32() macro to fix compilation
255 warning
256 * In qcom_spi_send_cmdaddr() api added cpu_to_le32() macro to fix compilation
257 warning
258 * In qcom_spi_io_op() api added cpu_to_le32() macro to fix compilation
259 warning
260
261 Change in [v8]
262
263 * Included "bitfield.h" file to /spi-qpic-snand.c
264 to fix compilation warning reported by kernel test robot
265 * Removed unused variable "steps" in
266 qcom_spi_ecc_init_ctx_pipelined() to fix compilation warning
267
268 Change in [v7]
269
270 * Added read_oob() and write_oob() api
271
272 * Handled offset value for oob layout
273
274 * Made CONFIG_SPI_QPIC_SNAND as bool
275
276 * Added macro ecceng_to_qspi()
277
278 * Added FIELD_PREP() Macro in spi init
279
280 * Added else condition in
281 qcom_spi_ecc_finish_io_req_pipelined()
282 for corrected ecc
283
284 * Handled multiple error condition for api
285 qcom_spi_cmd_mapping()
286
287 * Fix typo for printing debug message
288
289 Change in [v6]
290
291 * Added separate qpic_spi_nand{...} struct
292
293 * moved qpic_ecc and qcom_ecc_stats struct to
294 spi-qpic-snand.c file, since its spi nand
295 specific
296
297 * Added FIELD_PREP() and GENMASK() macro
298
299 * Removed rawnand.h and partition.h from
300 spi-qpic-snand.c
301
302 * Removed oob_buff assignment form
303 qcom_spi_write_page_cache
304
305 * Added qcom_nand_unalloc() in remove() path
306
307 * Fixes all all comments
308
309 Change in [v5]
310
311 * Added raw_read() and raw_write() api
312
313 * Updated commit message
314
315 * Removed register indirection
316
317 * Added qcom_spi_ prefix to all the api
318
319 * Removed snand_set_reg() api.
320
321 * Fixed nandbiterr issue
322
323 * Removed hardcoded num_cw and made it variable
324
325 * Removed hardcoded value for mtd pagesize
326
327 * Added -ENOSUPPORT in cmd mapping for unsupported
328 commands
329
330 * Replace if..else with switch..case statement
331
332 Change in [v4]
333
334 * No change
335
336 Change in [v3]
337
338 * Set SPI_QPIC_SNAND to n and added COMPILE_TEST in Kconfig
339
340 * Made driver name sorted in Make file
341
342 * Made comment like c++
343
344 * Changed macro to functions, snandc_set_read_loc_last()
345 and snandc_set_read_loc_first()
346
347 * Added error handling in snandc_set_reg()
348
349 * Changed into normal conditional statement for
350 return snandc->ecc_stats.failed ? -EBADMSG :
351 snandc->ecc_stats.bitflips;
352
353 * Remove cast of wbuf in qpic_snand_program_execute()
354 function
355
356 * Made num_cw variable instead hardcoded value
357
358 * changed if..else condition of function qpic_snand_io_op()
359 to switch..case statement
360
361 * Added __devm_spi_alloc_controller() api instead of
362 devm_spi_alloc_master()
363
364 * Disabling clock in remove path
365
366 Change in [v2]
367
368 * Added initial support for SPI-NAND driver
369
370 Change in [v1]
371
372 * Added RFC patch for design review
373
374 drivers/mtd/nand/Makefile | 4 +
375 drivers/spi/Kconfig | 9 +
376 drivers/spi/Makefile | 1 +
377 drivers/spi/spi-qpic-snand.c | 1633 ++++++++++++++++++++++++++
378 include/linux/mtd/nand-qpic-common.h | 7 +
379 5 files changed, 1654 insertions(+)
380 create mode 100644 drivers/spi/spi-qpic-snand.c
381
382 --- a/drivers/mtd/nand/Makefile
383 +++ b/drivers/mtd/nand/Makefile
384 @@ -4,7 +4,11 @@ nandcore-objs := core.o bbt.o
385 obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o
386 obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o
387 obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o
388 +ifeq ($(CONFIG_SPI_QPIC_SNAND),y)
389 +obj-$(CONFIG_SPI_QPIC_SNAND) += qpic_common.o
390 +else
391 obj-$(CONFIG_MTD_NAND_QCOM) += qpic_common.o
392 +endif
393 obj-y += onenand/
394 obj-y += raw/
395 obj-y += spi/
396 --- a/drivers/spi/Kconfig
397 +++ b/drivers/spi/Kconfig
398 @@ -870,6 +870,15 @@ config SPI_QCOM_QSPI
399 help
400 QSPI(Quad SPI) driver for Qualcomm QSPI controller.
401
402 +config SPI_QPIC_SNAND
403 + bool "QPIC SNAND controller"
404 + depends on ARCH_QCOM || COMPILE_TEST
405 + select MTD
406 + help
407 + QPIC_SNAND (QPIC SPI NAND) driver for Qualcomm QPIC controller.
408 + QPIC controller supports both parallel nand and serial nand.
409 + This config will enable serial nand driver for QPIC controller.
410 +
411 config SPI_QUP
412 tristate "Qualcomm SPI controller with QUP interface"
413 depends on ARCH_QCOM || COMPILE_TEST
414 --- a/drivers/spi/Makefile
415 +++ b/drivers/spi/Makefile
416 @@ -110,6 +110,7 @@ obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-
417 obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
418 obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
419 obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
420 +obj-$(CONFIG_SPI_QPIC_SNAND) += spi-qpic-snand.o
421 obj-$(CONFIG_SPI_QUP) += spi-qup.o
422 obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
423 obj-$(CONFIG_SPI_ROCKCHIP_SFC) += spi-rockchip-sfc.o
424 --- /dev/null
425 +++ b/drivers/spi/spi-qpic-snand.c
426 @@ -0,0 +1,1633 @@
427 +/*
428 + * SPDX-License-Identifier: GPL-2.0
429 + *
430 + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
431 + *
432 + * Authors:
433 + * Md Sadre Alam <quic_mdalam@quicinc.com>
434 + * Sricharan R <quic_srichara@quicinc.com>
435 + * Varadarajan Narayanan <quic_varada@quicinc.com>
436 + */
437 +#include <linux/bitops.h>
438 +#include <linux/clk.h>
439 +#include <linux/delay.h>
440 +#include <linux/dmaengine.h>
441 +#include <linux/dma-mapping.h>
442 +#include <linux/dma/qcom_adm.h>
443 +#include <linux/dma/qcom_bam_dma.h>
444 +#include <linux/module.h>
445 +#include <linux/of.h>
446 +#include <linux/platform_device.h>
447 +#include <linux/slab.h>
448 +#include <linux/mtd/nand-qpic-common.h>
449 +#include <linux/mtd/spinand.h>
450 +#include <linux/bitfield.h>
451 +
452 +#define NAND_FLASH_SPI_CFG 0xc0
453 +#define NAND_NUM_ADDR_CYCLES 0xc4
454 +#define NAND_BUSY_CHECK_WAIT_CNT 0xc8
455 +#define NAND_FLASH_FEATURES 0xf64
456 +
457 +/* QSPI NAND config reg bits */
458 +#define LOAD_CLK_CNTR_INIT_EN BIT(28)
459 +#define CLK_CNTR_INIT_VAL_VEC 0x924
460 +#define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16)
461 +#define FEA_STATUS_DEV_ADDR 0xc0
462 +#define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8)
463 +#define SPI_CFG BIT(0)
464 +#define SPI_NUM_ADDR 0xDA4DB
465 +#define SPI_WAIT_CNT 0x10
466 +#define QPIC_QSPI_NUM_CS 1
467 +#define SPI_TRANSFER_MODE_x1 BIT(29)
468 +#define SPI_TRANSFER_MODE_x4 (3 << 29)
469 +#define SPI_WP BIT(28)
470 +#define SPI_HOLD BIT(27)
471 +#define QPIC_SET_FEATURE BIT(31)
472 +
473 +#define SPINAND_RESET 0xff
474 +#define SPINAND_READID 0x9f
475 +#define SPINAND_GET_FEATURE 0x0f
476 +#define SPINAND_SET_FEATURE 0x1f
477 +#define SPINAND_READ 0x13
478 +#define SPINAND_ERASE 0xd8
479 +#define SPINAND_WRITE_EN 0x06
480 +#define SPINAND_PROGRAM_EXECUTE 0x10
481 +#define SPINAND_PROGRAM_LOAD 0x84
482 +
483 +#define ACC_FEATURE 0xe
484 +#define BAD_BLOCK_MARKER_SIZE 0x2
485 +#define OOB_BUF_SIZE 128
486 +#define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng)
487 +struct qpic_snand_op {
488 + u32 cmd_reg;
489 + u32 addr1_reg;
490 + u32 addr2_reg;
491 +};
492 +
493 +struct snandc_read_status {
494 + __le32 snandc_flash;
495 + __le32 snandc_buffer;
496 + __le32 snandc_erased_cw;
497 +};
498 +
499 +/*
500 + * ECC state struct
501 + * @corrected: ECC corrected
502 + * @bitflips: Max bit flip
503 + * @failed: ECC failed
504 + */
505 +struct qcom_ecc_stats {
506 + u32 corrected;
507 + u32 bitflips;
508 + u32 failed;
509 +};
510 +
511 +struct qpic_ecc {
512 + struct device *dev;
513 + int ecc_bytes_hw;
514 + int spare_bytes;
515 + int bbm_size;
516 + int ecc_mode;
517 + int bytes;
518 + int steps;
519 + int step_size;
520 + int strength;
521 + int cw_size;
522 + int cw_data;
523 + u32 cfg0;
524 + u32 cfg1;
525 + u32 cfg0_raw;
526 + u32 cfg1_raw;
527 + u32 ecc_buf_cfg;
528 + u32 ecc_bch_cfg;
529 + u32 clrflashstatus;
530 + u32 clrreadstatus;
531 + bool bch_enabled;
532 +};
533 +
534 +struct qpic_spi_nand {
535 + struct qcom_nand_controller *snandc;
536 + struct spi_controller *ctlr;
537 + struct mtd_info *mtd;
538 + struct clk *iomacro_clk;
539 + struct qpic_ecc *ecc;
540 + struct qcom_ecc_stats ecc_stats;
541 + struct nand_ecc_engine ecc_eng;
542 + u8 *data_buf;
543 + u8 *oob_buf;
544 + u32 wlen;
545 + __le32 addr1;
546 + __le32 addr2;
547 + __le32 cmd;
548 + u32 num_cw;
549 + bool oob_rw;
550 + bool page_rw;
551 + bool raw_rw;
552 +};
553 +
554 +static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc,
555 + int reg, int cw_offset, int read_size,
556 + int is_last_read_loc)
557 +{
558 + __le32 locreg_val;
559 + u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
560 + ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
561 + << READ_LOCATION_LAST));
562 +
563 + locreg_val = cpu_to_le32(val);
564 +
565 + if (reg == NAND_READ_LOCATION_0)
566 + snandc->regs->read_location0 = locreg_val;
567 + else if (reg == NAND_READ_LOCATION_1)
568 + snandc->regs->read_location1 = locreg_val;
569 + else if (reg == NAND_READ_LOCATION_2)
570 + snandc->regs->read_location1 = locreg_val;
571 + else if (reg == NAND_READ_LOCATION_3)
572 + snandc->regs->read_location3 = locreg_val;
573 +}
574 +
575 +static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc,
576 + int reg, int cw_offset, int read_size,
577 + int is_last_read_loc)
578 +{
579 + __le32 locreg_val;
580 + u32 val = (((cw_offset) << READ_LOCATION_OFFSET) |
581 + ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc)
582 + << READ_LOCATION_LAST));
583 +
584 + locreg_val = cpu_to_le32(val);
585 +
586 + if (reg == NAND_READ_LOCATION_LAST_CW_0)
587 + snandc->regs->read_location_last0 = locreg_val;
588 + else if (reg == NAND_READ_LOCATION_LAST_CW_1)
589 + snandc->regs->read_location_last1 = locreg_val;
590 + else if (reg == NAND_READ_LOCATION_LAST_CW_2)
591 + snandc->regs->read_location_last2 = locreg_val;
592 + else if (reg == NAND_READ_LOCATION_LAST_CW_3)
593 + snandc->regs->read_location_last3 = locreg_val;
594 +}
595 +
596 +static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand)
597 +{
598 + struct nand_ecc_engine *eng = nand->ecc.engine;
599 + struct qpic_spi_nand *qspi = ecceng_to_qspi(eng);
600 +
601 + return qspi->snandc;
602 +}
603 +
604 +static int qcom_spi_init(struct qcom_nand_controller *snandc)
605 +{
606 + u32 snand_cfg_val = 0x0;
607 + int ret;
608 +
609 + snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) |
610 + FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) |
611 + FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) |
612 + FIELD_PREP(SPI_CFG, 0);
613 +
614 + snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
615 + snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR);
616 + snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT);
617 +
618 + qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
619 +
620 + snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN;
621 + snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val);
622 +
623 + qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0);
624 +
625 + qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0);
626 + qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1,
627 + NAND_BAM_NEXT_SGL);
628 +
629 + ret = qcom_submit_descs(snandc);
630 + if (ret) {
631 + dev_err(snandc->dev, "failure in submitting spi init descriptor\n");
632 + return ret;
633 + }
634 +
635 + return ret;
636 +}
637 +
638 +static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
639 + struct mtd_oob_region *oobregion)
640 +{
641 + struct nand_device *nand = mtd_to_nanddev(mtd);
642 + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
643 + struct qpic_ecc *qecc = snandc->qspi->ecc;
644 +
645 + if (section > 1)
646 + return -ERANGE;
647 +
648 + oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
649 + oobregion->offset = mtd->oobsize - oobregion->length;
650 +
651 + return 0;
652 +}
653 +
654 +static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
655 + struct mtd_oob_region *oobregion)
656 +{
657 + struct nand_device *nand = mtd_to_nanddev(mtd);
658 + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
659 + struct qpic_ecc *qecc = snandc->qspi->ecc;
660 +
661 + if (section)
662 + return -ERANGE;
663 +
664 + oobregion->length = qecc->steps * 4;
665 + oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size;
666 +
667 + return 0;
668 +}
669 +
670 +static const struct mtd_ooblayout_ops qcom_spi_ooblayout = {
671 + .ecc = qcom_spi_ooblayout_ecc,
672 + .free = qcom_spi_ooblayout_free,
673 +};
674 +
675 +static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
676 +{
677 + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
678 + struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
679 + struct mtd_info *mtd = nanddev_to_mtd(nand);
680 + int cwperpage, bad_block_byte;
681 + struct qpic_ecc *ecc_cfg;
682 +
683 + cwperpage = mtd->writesize / NANDC_STEP_SIZE;
684 + snandc->qspi->num_cw = cwperpage;
685 +
686 + ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL);
687 + if (!ecc_cfg)
688 + return -ENOMEM;
689 + snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize,
690 + GFP_KERNEL);
691 + if (!snandc->qspi->oob_buf)
692 + return -ENOMEM;
693 +
694 + memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize);
695 +
696 + nand->ecc.ctx.priv = ecc_cfg;
697 + snandc->qspi->mtd = mtd;
698 +
699 + ecc_cfg->ecc_bytes_hw = 7;
700 + ecc_cfg->spare_bytes = 4;
701 + ecc_cfg->bbm_size = 1;
702 + ecc_cfg->bch_enabled = true;
703 + ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
704 +
705 + ecc_cfg->steps = 4;
706 + ecc_cfg->strength = 4;
707 + ecc_cfg->step_size = 512;
708 + ecc_cfg->cw_data = 516;
709 + ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
710 + bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
711 +
712 + mtd_set_ooblayout(mtd, &qcom_spi_ooblayout);
713 +
714 + ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
715 + FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) |
716 + FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) |
717 + FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
718 + FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) |
719 + FIELD_PREP(STATUS_BFR_READ, 0) |
720 + FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) |
721 + FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes);
722 +
723 + ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
724 + FIELD_PREP(CS_ACTIVE_BSY, 0) |
725 + FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) |
726 + FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) |
727 + FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
728 + FIELD_PREP(WIDE_FLASH, 0) |
729 + FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled);
730 +
731 + ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) |
732 + FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) |
733 + FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) |
734 + FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
735 +
736 + ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) |
737 + FIELD_PREP(CS_ACTIVE_BSY, 0) |
738 + FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
739 + FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
740 + FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) |
741 + FIELD_PREP(WIDE_FLASH, 0) |
742 + FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
743 +
744 + ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) |
745 + FIELD_PREP(ECC_SW_RESET, 0) |
746 + FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) |
747 + FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) |
748 + FIELD_PREP(ECC_MODE_MASK, 0) |
749 + FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw);
750 +
751 + ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS;
752 + ecc_cfg->clrflashstatus = FS_READY_BSY_N;
753 + ecc_cfg->clrreadstatus = 0xc0;
754 +
755 + conf->step_size = ecc_cfg->step_size;
756 + conf->strength = ecc_cfg->strength;
757 +
758 + snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET);
759 + snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET);
760 +
761 + dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n",
762 + ecc_cfg->strength, ecc_cfg->step_size);
763 +
764 + return 0;
765 +}
766 +
767 +static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand)
768 +{
769 + struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
770 +
771 + kfree(ecc_cfg);
772 +}
773 +
774 +static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand,
775 + struct nand_page_io_req *req)
776 +{
777 + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
778 + struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand);
779 +
780 + snandc->qspi->ecc = ecc_cfg;
781 + snandc->qspi->raw_rw = false;
782 + snandc->qspi->oob_rw = false;
783 + snandc->qspi->page_rw = false;
784 +
785 + if (req->datalen)
786 + snandc->qspi->page_rw = true;
787 +
788 + if (req->ooblen)
789 + snandc->qspi->oob_rw = true;
790 +
791 + if (req->mode == MTD_OPS_RAW)
792 + snandc->qspi->raw_rw = true;
793 +
794 + return 0;
795 +}
796 +
797 +static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand,
798 + struct nand_page_io_req *req)
799 +{
800 + struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
801 + struct mtd_info *mtd = nanddev_to_mtd(nand);
802 +
803 + if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ)
804 + return 0;
805 +
806 + if (snandc->qspi->ecc_stats.failed)
807 + mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed;
808 + else
809 + mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected;
810 +
811 + if (snandc->qspi->ecc_stats.failed)
812 + return -EBADMSG;
813 + else
814 + return snandc->qspi->ecc_stats.bitflips;
815 +}
816 +
817 +static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = {
818 + .init_ctx = qcom_spi_ecc_init_ctx_pipelined,
819 + .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined,
820 + .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined,
821 + .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined,
822 +};
823 +
824 +/* helper to configure location register values */
825 +static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg,
826 + int cw_offset, int read_size, int is_last_read_loc)
827 +{
828 + int reg_base = NAND_READ_LOCATION_0;
829 + int num_cw = snandc->qspi->num_cw;
830 +
831 + if (cw == (num_cw - 1))
832 + reg_base = NAND_READ_LOCATION_LAST_CW_0;
833 +
834 + reg_base += reg * 4;
835 +
836 + if (cw == (num_cw - 1))
837 + return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset,
838 + read_size, is_last_read_loc);
839 + else
840 + return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset,
841 + read_size, is_last_read_loc);
842 +}
843 +
844 +static void
845 +qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw)
846 +{
847 + __le32 *reg = &snandc->regs->read_location0;
848 + int num_cw = snandc->qspi->num_cw;
849 +
850 + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
851 + if (cw == (num_cw - 1)) {
852 + reg = &snandc->regs->read_location_last0;
853 + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4,
854 + NAND_BAM_NEXT_SGL);
855 + }
856 +
857 + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
858 + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
859 +
860 + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0);
861 + qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1,
862 + NAND_BAM_NEXT_SGL);
863 +}
864 +
865 +static int qcom_spi_block_erase(struct qcom_nand_controller *snandc)
866 +{
867 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
868 + int ret;
869 +
870 + snandc->buf_count = 0;
871 + snandc->buf_start = 0;
872 + qcom_clear_read_regs(snandc);
873 + qcom_clear_bam_transaction(snandc);
874 +
875 + snandc->regs->cmd = snandc->qspi->cmd;
876 + snandc->regs->addr0 = snandc->qspi->addr1;
877 + snandc->regs->addr1 = snandc->qspi->addr2;
878 + snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE));
879 + snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw);
880 + snandc->regs->exec = cpu_to_le32(1);
881 +
882 + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
883 + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
884 + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
885 +
886 + ret = qcom_submit_descs(snandc);
887 + if (ret) {
888 + dev_err(snandc->dev, "failure to erase block\n");
889 + return ret;
890 + }
891 +
892 + return 0;
893 +}
894 +
895 +static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc,
896 + bool use_ecc, int cw)
897 +{
898 + __le32 *reg = &snandc->regs->read_location0;
899 + int num_cw = snandc->qspi->num_cw;
900 +
901 + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
902 + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
903 + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
904 + NAND_ERASED_CW_DETECT_CFG, 1, 0);
905 + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
906 + NAND_ERASED_CW_DETECT_CFG, 1,
907 + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
908 +
909 + if (cw == (num_cw - 1)) {
910 + reg = &snandc->regs->read_location_last0;
911 + qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL);
912 + }
913 + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
914 + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
915 +
916 + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0);
917 +}
918 +
919 +static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc,
920 + const struct spi_mem_op *op)
921 +{
922 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
923 + struct mtd_info *mtd = snandc->qspi->mtd;
924 + int size, ret = 0;
925 + int col, bbpos;
926 + u32 cfg0, cfg1, ecc_bch_cfg;
927 + u32 num_cw = snandc->qspi->num_cw;
928 +
929 + qcom_clear_bam_transaction(snandc);
930 + qcom_clear_read_regs(snandc);
931 +
932 + size = ecc_cfg->cw_size;
933 + col = ecc_cfg->cw_size * (num_cw - 1);
934 +
935 + memset(snandc->data_buffer, 0xff, size);
936 + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
937 + snandc->regs->addr1 = snandc->qspi->addr2;
938 +
939 + cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
940 + 0 << CW_PER_PAGE;
941 + cfg1 = ecc_cfg->cfg1_raw;
942 + ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
943 +
944 + snandc->regs->cmd = snandc->qspi->cmd;
945 + snandc->regs->cfg0 = cpu_to_le32(cfg0);
946 + snandc->regs->cfg1 = cpu_to_le32(cfg1);
947 + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
948 + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
949 + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
950 + snandc->regs->exec = cpu_to_le32(1);
951 +
952 + qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1);
953 +
954 + qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1);
955 +
956 + qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0);
957 +
958 + ret = qcom_submit_descs(snandc);
959 + if (ret) {
960 + dev_err(snandc->dev, "failed to read last cw\n");
961 + return ret;
962 + }
963 +
964 + qcom_nandc_dev_to_mem(snandc, true);
965 + u32 flash = le32_to_cpu(snandc->reg_read_buf[0]);
966 +
967 + if (flash & (FS_OP_ERR | FS_MPU_ERR))
968 + return -EIO;
969 +
970 + bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
971 +
972 + if (snandc->data_buffer[bbpos] == 0xff)
973 + snandc->data_buffer[bbpos + 1] = 0xff;
974 + if (snandc->data_buffer[bbpos] != 0xff)
975 + snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos];
976 +
977 + memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes);
978 +
979 + return ret;
980 +}
981 +
982 +static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf)
983 +{
984 + struct snandc_read_status *buf;
985 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
986 + int i, num_cw = snandc->qspi->num_cw;
987 + bool flash_op_err = false, erased;
988 + unsigned int max_bitflips = 0;
989 + unsigned int uncorrectable_cws = 0;
990 +
991 + snandc->qspi->ecc_stats.failed = 0;
992 + snandc->qspi->ecc_stats.corrected = 0;
993 +
994 + qcom_nandc_dev_to_mem(snandc, true);
995 + buf = (struct snandc_read_status *)snandc->reg_read_buf;
996 +
997 + for (i = 0; i < num_cw; i++, buf++) {
998 + u32 flash, buffer, erased_cw;
999 + int data_len, oob_len;
1000 +
1001 + if (i == (num_cw - 1)) {
1002 + data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
1003 + oob_len = num_cw << 2;
1004 + } else {
1005 + data_len = ecc_cfg->cw_data;
1006 + oob_len = 0;
1007 + }
1008 +
1009 + flash = le32_to_cpu(buf->snandc_flash);
1010 + buffer = le32_to_cpu(buf->snandc_buffer);
1011 + erased_cw = le32_to_cpu(buf->snandc_erased_cw);
1012 +
1013 + if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) {
1014 + if (ecc_cfg->bch_enabled)
1015 + erased = (erased_cw & ERASED_CW) == ERASED_CW;
1016 + else
1017 + erased = false;
1018 +
1019 + if (!erased)
1020 + uncorrectable_cws |= BIT(i);
1021 +
1022 + } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
1023 + flash_op_err = true;
1024 + } else {
1025 + unsigned int stat;
1026 +
1027 + stat = buffer & BS_CORRECTABLE_ERR_MSK;
1028 + snandc->qspi->ecc_stats.corrected += stat;
1029 + max_bitflips = max(max_bitflips, stat);
1030 + }
1031 +
1032 + if (data_buf)
1033 + data_buf += data_len;
1034 + if (oob_buf)
1035 + oob_buf += oob_len + ecc_cfg->bytes;
1036 + }
1037 +
1038 + if (flash_op_err)
1039 + return -EIO;
1040 +
1041 + if (!uncorrectable_cws)
1042 + snandc->qspi->ecc_stats.bitflips = max_bitflips;
1043 + else
1044 + snandc->qspi->ecc_stats.failed++;
1045 +
1046 + return 0;
1047 +}
1048 +
1049 +static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt)
1050 +{
1051 + int i;
1052 +
1053 + qcom_nandc_dev_to_mem(snandc, true);
1054 +
1055 + for (i = 0; i < cw_cnt; i++) {
1056 + u32 flash = le32_to_cpu(snandc->reg_read_buf[i]);
1057 +
1058 + if (flash & (FS_OP_ERR | FS_MPU_ERR))
1059 + return -EIO;
1060 + }
1061 +
1062 + return 0;
1063 +}
1064 +
1065 +static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf,
1066 + u8 *oob_buf, int cw)
1067 +{
1068 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1069 + struct mtd_info *mtd = snandc->qspi->mtd;
1070 + int data_size1, data_size2, oob_size1, oob_size2;
1071 + int ret, reg_off = FLASH_BUF_ACC, read_loc = 0;
1072 + int raw_cw = cw;
1073 + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
1074 + int col;
1075 +
1076 + snandc->buf_count = 0;
1077 + snandc->buf_start = 0;
1078 + qcom_clear_read_regs(snandc);
1079 + qcom_clear_bam_transaction(snandc);
1080 + raw_cw = num_cw - 1;
1081 +
1082 + cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
1083 + 0 << CW_PER_PAGE;
1084 + cfg1 = ecc_cfg->cfg1_raw;
1085 + ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
1086 +
1087 + col = ecc_cfg->cw_size * cw;
1088 +
1089 + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
1090 + snandc->regs->addr1 = snandc->qspi->addr2;
1091 + snandc->regs->cmd = snandc->qspi->cmd;
1092 + snandc->regs->cfg0 = cpu_to_le32(cfg0);
1093 + snandc->regs->cfg1 = cpu_to_le32(cfg1);
1094 + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1095 + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
1096 + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
1097 + snandc->regs->exec = cpu_to_le32(1);
1098 +
1099 + qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1);
1100 +
1101 + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
1102 + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
1103 + qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
1104 +
1105 + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
1106 + NAND_ERASED_CW_DETECT_CFG, 1, 0);
1107 + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
1108 + NAND_ERASED_CW_DETECT_CFG, 1,
1109 + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1110 +
1111 + data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
1112 + oob_size1 = ecc_cfg->bbm_size;
1113 +
1114 + if (cw == (num_cw - 1)) {
1115 + data_size2 = NANDC_STEP_SIZE - data_size1 -
1116 + ((num_cw - 1) * 4);
1117 + oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw +
1118 + ecc_cfg->spare_bytes;
1119 + } else {
1120 + data_size2 = ecc_cfg->cw_data - data_size1;
1121 + oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
1122 + }
1123 +
1124 + qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0);
1125 + read_loc += data_size1;
1126 +
1127 + qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0);
1128 + read_loc += oob_size1;
1129 +
1130 + qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0);
1131 + read_loc += data_size2;
1132 +
1133 + qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1);
1134 +
1135 + qcom_spi_config_cw_read(snandc, false, raw_cw);
1136 +
1137 + qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0);
1138 + reg_off += data_size1;
1139 +
1140 + qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0);
1141 + reg_off += oob_size1;
1142 +
1143 + qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0);
1144 + reg_off += data_size2;
1145 +
1146 + qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
1147 +
1148 + ret = qcom_submit_descs(snandc);
1149 + if (ret) {
1150 + dev_err(snandc->dev, "failure to read raw cw %d\n", cw);
1151 + return ret;
1152 + }
1153 +
1154 + return qcom_spi_check_raw_flash_errors(snandc, 1);
1155 +}
1156 +
1157 +static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc,
1158 + const struct spi_mem_op *op)
1159 +{
1160 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1161 + u8 *data_buf = NULL, *oob_buf = NULL;
1162 + int ret, cw;
1163 + u32 num_cw = snandc->qspi->num_cw;
1164 +
1165 + if (snandc->qspi->page_rw)
1166 + data_buf = op->data.buf.in;
1167 +
1168 + oob_buf = snandc->qspi->oob_buf;
1169 + memset(oob_buf, 0xff, OOB_BUF_SIZE);
1170 +
1171 + for (cw = 0; cw < num_cw; cw++) {
1172 + ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw);
1173 + if (ret)
1174 + return ret;
1175 +
1176 + if (data_buf)
1177 + data_buf += ecc_cfg->cw_data;
1178 + if (oob_buf)
1179 + oob_buf += ecc_cfg->bytes;
1180 + }
1181 +
1182 + return 0;
1183 +}
1184 +
1185 +static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc,
1186 + const struct spi_mem_op *op)
1187 +{
1188 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1189 + u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
1190 + int ret, i;
1191 + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
1192 +
1193 + data_buf = op->data.buf.in;
1194 + data_buf_start = data_buf;
1195 +
1196 + oob_buf = snandc->qspi->oob_buf;
1197 + oob_buf_start = oob_buf;
1198 +
1199 + snandc->buf_count = 0;
1200 + snandc->buf_start = 0;
1201 + qcom_clear_read_regs(snandc);
1202 +
1203 + cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
1204 + (num_cw - 1) << CW_PER_PAGE;
1205 + cfg1 = ecc_cfg->cfg1;
1206 + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
1207 +
1208 + snandc->regs->addr0 = snandc->qspi->addr1;
1209 + snandc->regs->addr1 = snandc->qspi->addr2;
1210 + snandc->regs->cmd = snandc->qspi->cmd;
1211 + snandc->regs->cfg0 = cpu_to_le32(cfg0);
1212 + snandc->regs->cfg1 = cpu_to_le32(cfg1);
1213 + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1214 + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
1215 + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
1216 + snandc->regs->exec = cpu_to_le32(1);
1217 +
1218 + qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
1219 +
1220 + qcom_clear_bam_transaction(snandc);
1221 +
1222 + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
1223 + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
1224 + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
1225 + NAND_ERASED_CW_DETECT_CFG, 1, 0);
1226 + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
1227 + NAND_ERASED_CW_DETECT_CFG, 1,
1228 + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1229 +
1230 + for (i = 0; i < num_cw; i++) {
1231 + int data_size, oob_size;
1232 +
1233 + if (i == (num_cw - 1)) {
1234 + data_size = 512 - ((num_cw - 1) << 2);
1235 + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
1236 + ecc_cfg->spare_bytes;
1237 + } else {
1238 + data_size = ecc_cfg->cw_data;
1239 + oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
1240 + }
1241 +
1242 + if (data_buf && oob_buf) {
1243 + qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0);
1244 + qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1);
1245 + } else if (data_buf) {
1246 + qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1);
1247 + } else {
1248 + qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
1249 + }
1250 +
1251 + qcom_spi_config_cw_read(snandc, true, i);
1252 +
1253 + if (data_buf)
1254 + qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf,
1255 + data_size, 0);
1256 + if (oob_buf) {
1257 + int j;
1258 +
1259 + for (j = 0; j < ecc_cfg->bbm_size; j++)
1260 + *oob_buf++ = 0xff;
1261 +
1262 + qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
1263 + oob_buf, oob_size, 0);
1264 + }
1265 +
1266 + if (data_buf)
1267 + data_buf += data_size;
1268 + if (oob_buf)
1269 + oob_buf += oob_size;
1270 + }
1271 +
1272 + ret = qcom_submit_descs(snandc);
1273 + if (ret) {
1274 + dev_err(snandc->dev, "failure to read page\n");
1275 + return ret;
1276 + }
1277 +
1278 + return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
1279 +}
1280 +
1281 +static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc,
1282 + const struct spi_mem_op *op)
1283 +{
1284 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1285 + u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start;
1286 + int ret, i;
1287 + u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw;
1288 +
1289 + oob_buf = op->data.buf.in;
1290 + oob_buf_start = oob_buf;
1291 +
1292 + data_buf_start = data_buf;
1293 +
1294 + snandc->buf_count = 0;
1295 + snandc->buf_start = 0;
1296 + qcom_clear_read_regs(snandc);
1297 + qcom_clear_bam_transaction(snandc);
1298 +
1299 + cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
1300 + (num_cw - 1) << CW_PER_PAGE;
1301 + cfg1 = ecc_cfg->cfg1;
1302 + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
1303 +
1304 + snandc->regs->addr0 = snandc->qspi->addr1;
1305 + snandc->regs->addr1 = snandc->qspi->addr2;
1306 + snandc->regs->cmd = snandc->qspi->cmd;
1307 + snandc->regs->cfg0 = cpu_to_le32(cfg0);
1308 + snandc->regs->cfg1 = cpu_to_le32(cfg1);
1309 + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1310 + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
1311 + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
1312 + snandc->regs->exec = cpu_to_le32(1);
1313 +
1314 + qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1);
1315 +
1316 + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
1317 + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
1318 + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr,
1319 + NAND_ERASED_CW_DETECT_CFG, 1, 0);
1320 + qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set,
1321 + NAND_ERASED_CW_DETECT_CFG, 1,
1322 + NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
1323 +
1324 + for (i = 0; i < num_cw; i++) {
1325 + int data_size, oob_size;
1326 +
1327 + if (i == (num_cw - 1)) {
1328 + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
1329 + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
1330 + ecc_cfg->spare_bytes;
1331 + } else {
1332 + data_size = ecc_cfg->cw_data;
1333 + oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
1334 + }
1335 +
1336 + qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1);
1337 +
1338 + qcom_spi_config_cw_read(snandc, true, i);
1339 +
1340 + if (oob_buf) {
1341 + int j;
1342 +
1343 + for (j = 0; j < ecc_cfg->bbm_size; j++)
1344 + *oob_buf++ = 0xff;
1345 +
1346 + qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size,
1347 + oob_buf, oob_size, 0);
1348 + }
1349 +
1350 + if (oob_buf)
1351 + oob_buf += oob_size;
1352 + }
1353 +
1354 + ret = qcom_submit_descs(snandc);
1355 + if (ret) {
1356 + dev_err(snandc->dev, "failure to read oob\n");
1357 + return ret;
1358 + }
1359 +
1360 + return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start);
1361 +}
1362 +
1363 +static int qcom_spi_read_page(struct qcom_nand_controller *snandc,
1364 + const struct spi_mem_op *op)
1365 +{
1366 + if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
1367 + return qcom_spi_read_page_raw(snandc, op);
1368 +
1369 + if (snandc->qspi->page_rw)
1370 + return qcom_spi_read_page_ecc(snandc, op);
1371 +
1372 + if (snandc->qspi->oob_rw && snandc->qspi->raw_rw)
1373 + return qcom_spi_read_last_cw(snandc, op);
1374 +
1375 + if (snandc->qspi->oob_rw)
1376 + return qcom_spi_read_page_oob(snandc, op);
1377 +
1378 + return 0;
1379 +}
1380 +
1381 +static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc)
1382 +{
1383 + qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0);
1384 + qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
1385 + qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG,
1386 + 1, NAND_BAM_NEXT_SGL);
1387 +}
1388 +
1389 +static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc)
1390 +{
1391 + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
1392 + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1393 + qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
1394 +
1395 + qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
1396 + qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
1397 + NAND_BAM_NEXT_SGL);
1398 +}
1399 +
1400 +static int qcom_spi_program_raw(struct qcom_nand_controller *snandc,
1401 + const struct spi_mem_op *op)
1402 +{
1403 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1404 + struct mtd_info *mtd = snandc->qspi->mtd;
1405 + u8 *data_buf = NULL, *oob_buf = NULL;
1406 + int i, ret;
1407 + int num_cw = snandc->qspi->num_cw;
1408 + u32 cfg0, cfg1, ecc_bch_cfg;
1409 +
1410 + cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) |
1411 + (num_cw - 1) << CW_PER_PAGE;
1412 + cfg1 = ecc_cfg->cfg1_raw;
1413 + ecc_bch_cfg = ECC_CFG_ECC_DISABLE;
1414 +
1415 + data_buf = snandc->qspi->data_buf;
1416 +
1417 + oob_buf = snandc->qspi->oob_buf;
1418 + memset(oob_buf, 0xff, OOB_BUF_SIZE);
1419 +
1420 + snandc->buf_count = 0;
1421 + snandc->buf_start = 0;
1422 + qcom_clear_read_regs(snandc);
1423 + qcom_clear_bam_transaction(snandc);
1424 +
1425 + snandc->regs->addr0 = snandc->qspi->addr1;
1426 + snandc->regs->addr1 = snandc->qspi->addr2;
1427 + snandc->regs->cmd = snandc->qspi->cmd;
1428 + snandc->regs->cfg0 = cpu_to_le32(cfg0);
1429 + snandc->regs->cfg1 = cpu_to_le32(cfg1);
1430 + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1431 + snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus);
1432 + snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus);
1433 + snandc->regs->exec = cpu_to_le32(1);
1434 +
1435 + qcom_spi_config_page_write(snandc);
1436 +
1437 + for (i = 0; i < num_cw; i++) {
1438 + int data_size1, data_size2, oob_size1, oob_size2;
1439 + int reg_off = FLASH_BUF_ACC;
1440 +
1441 + data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1);
1442 + oob_size1 = ecc_cfg->bbm_size;
1443 +
1444 + if (i == (num_cw - 1)) {
1445 + data_size2 = NANDC_STEP_SIZE - data_size1 -
1446 + ((num_cw - 1) << 2);
1447 + oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
1448 + ecc_cfg->spare_bytes;
1449 + } else {
1450 + data_size2 = ecc_cfg->cw_data - data_size1;
1451 + oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes;
1452 + }
1453 +
1454 + qcom_write_data_dma(snandc, reg_off, data_buf, data_size1,
1455 + NAND_BAM_NO_EOT);
1456 + reg_off += data_size1;
1457 + data_buf += data_size1;
1458 +
1459 + qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1,
1460 + NAND_BAM_NO_EOT);
1461 + oob_buf += oob_size1;
1462 + reg_off += oob_size1;
1463 +
1464 + qcom_write_data_dma(snandc, reg_off, data_buf, data_size2,
1465 + NAND_BAM_NO_EOT);
1466 + reg_off += data_size2;
1467 + data_buf += data_size2;
1468 +
1469 + qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0);
1470 + oob_buf += oob_size2;
1471 +
1472 + qcom_spi_config_cw_write(snandc);
1473 + }
1474 +
1475 + ret = qcom_submit_descs(snandc);
1476 + if (ret) {
1477 + dev_err(snandc->dev, "failure to write raw page\n");
1478 + return ret;
1479 + }
1480 +
1481 + return 0;
1482 +}
1483 +
1484 +static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc,
1485 + const struct spi_mem_op *op)
1486 +{
1487 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1488 + u8 *data_buf = NULL, *oob_buf = NULL;
1489 + int i, ret;
1490 + int num_cw = snandc->qspi->num_cw;
1491 + u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
1492 +
1493 + cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
1494 + (num_cw - 1) << CW_PER_PAGE;
1495 + cfg1 = ecc_cfg->cfg1;
1496 + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
1497 + ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
1498 +
1499 + if (snandc->qspi->data_buf)
1500 + data_buf = snandc->qspi->data_buf;
1501 +
1502 + oob_buf = snandc->qspi->oob_buf;
1503 +
1504 + snandc->buf_count = 0;
1505 + snandc->buf_start = 0;
1506 + qcom_clear_read_regs(snandc);
1507 + qcom_clear_bam_transaction(snandc);
1508 +
1509 + snandc->regs->addr0 = snandc->qspi->addr1;
1510 + snandc->regs->addr1 = snandc->qspi->addr2;
1511 + snandc->regs->cmd = snandc->qspi->cmd;
1512 + snandc->regs->cfg0 = cpu_to_le32(cfg0);
1513 + snandc->regs->cfg1 = cpu_to_le32(cfg1);
1514 + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1515 + snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
1516 + snandc->regs->exec = cpu_to_le32(1);
1517 +
1518 + qcom_spi_config_page_write(snandc);
1519 +
1520 + for (i = 0; i < num_cw; i++) {
1521 + int data_size, oob_size;
1522 +
1523 + if (i == (num_cw - 1)) {
1524 + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
1525 + oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw +
1526 + ecc_cfg->spare_bytes;
1527 + } else {
1528 + data_size = ecc_cfg->cw_data;
1529 + oob_size = ecc_cfg->bytes;
1530 + }
1531 +
1532 + if (data_buf)
1533 + qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size,
1534 + i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0);
1535 +
1536 + if (i == (num_cw - 1)) {
1537 + if (oob_buf) {
1538 + oob_buf += ecc_cfg->bbm_size;
1539 + qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size,
1540 + oob_buf, oob_size, 0);
1541 + }
1542 + }
1543 +
1544 + qcom_spi_config_cw_write(snandc);
1545 +
1546 + if (data_buf)
1547 + data_buf += data_size;
1548 + if (oob_buf)
1549 + oob_buf += oob_size;
1550 + }
1551 +
1552 + ret = qcom_submit_descs(snandc);
1553 + if (ret) {
1554 + dev_err(snandc->dev, "failure to write page\n");
1555 + return ret;
1556 + }
1557 +
1558 + return 0;
1559 +}
1560 +
1561 +static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
1562 + const struct spi_mem_op *op)
1563 +{
1564 + struct qpic_ecc *ecc_cfg = snandc->qspi->ecc;
1565 + u8 *oob_buf = NULL;
1566 + int ret, col, data_size, oob_size;
1567 + int num_cw = snandc->qspi->num_cw;
1568 + u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
1569 +
1570 + cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) |
1571 + (num_cw - 1) << CW_PER_PAGE;
1572 + cfg1 = ecc_cfg->cfg1;
1573 + ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
1574 + ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
1575 +
1576 + col = ecc_cfg->cw_size * (num_cw - 1);
1577 +
1578 + oob_buf = snandc->qspi->data_buf;
1579 +
1580 + snandc->buf_count = 0;
1581 + snandc->buf_start = 0;
1582 + qcom_clear_read_regs(snandc);
1583 + qcom_clear_bam_transaction(snandc);
1584 + snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col));
1585 + snandc->regs->addr1 = snandc->qspi->addr2;
1586 + snandc->regs->cmd = snandc->qspi->cmd;
1587 + snandc->regs->cfg0 = cpu_to_le32(cfg0);
1588 + snandc->regs->cfg1 = cpu_to_le32(cfg1);
1589 + snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg);
1590 + snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg);
1591 + snandc->regs->exec = cpu_to_le32(1);
1592 +
1593 + /* calculate the data and oob size for the last codeword/step */
1594 + data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2);
1595 + oob_size = snandc->qspi->mtd->oobavail;
1596 +
1597 + memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data);
1598 + /* override new oob content to last codeword */
1599 + mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size,
1600 + oob_buf, 0, snandc->qspi->mtd->oobavail);
1601 + qcom_spi_config_page_write(snandc);
1602 + qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0);
1603 + qcom_spi_config_cw_write(snandc);
1604 +
1605 + ret = qcom_submit_descs(snandc);
1606 + if (ret) {
1607 + dev_err(snandc->dev, "failure to write oob\n");
1608 + return ret;
1609 + }
1610 +
1611 + return 0;
1612 +}
1613 +
1614 +static int qcom_spi_program_execute(struct qcom_nand_controller *snandc,
1615 + const struct spi_mem_op *op)
1616 +{
1617 + if (snandc->qspi->page_rw && snandc->qspi->raw_rw)
1618 + return qcom_spi_program_raw(snandc, op);
1619 +
1620 + if (snandc->qspi->page_rw)
1621 + return qcom_spi_program_ecc(snandc, op);
1622 +
1623 + if (snandc->qspi->oob_rw)
1624 + return qcom_spi_program_oob(snandc, op);
1625 +
1626 + return 0;
1627 +}
1628 +
1629 +static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode)
1630 +{
1631 + int cmd = 0x0;
1632 +
1633 + switch (opcode) {
1634 + case SPINAND_RESET:
1635 + cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE);
1636 + break;
1637 + case SPINAND_READID:
1638 + cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID);
1639 + break;
1640 + case SPINAND_GET_FEATURE:
1641 + cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE);
1642 + break;
1643 + case SPINAND_SET_FEATURE:
1644 + cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE |
1645 + QPIC_SET_FEATURE);
1646 + break;
1647 + case SPINAND_READ:
1648 + if (snandc->qspi->raw_rw) {
1649 + cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
1650 + SPI_WP | SPI_HOLD | OP_PAGE_READ);
1651 + } else {
1652 + cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
1653 + SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC);
1654 + }
1655 +
1656 + break;
1657 + case SPINAND_ERASE:
1658 + cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP |
1659 + SPI_HOLD | SPI_TRANSFER_MODE_x1;
1660 + break;
1661 + case SPINAND_WRITE_EN:
1662 + cmd = SPINAND_WRITE_EN;
1663 + break;
1664 + case SPINAND_PROGRAM_EXECUTE:
1665 + cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 |
1666 + SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE);
1667 + break;
1668 + case SPINAND_PROGRAM_LOAD:
1669 + cmd = SPINAND_PROGRAM_LOAD;
1670 + break;
1671 + default:
1672 + dev_err(snandc->dev, "Opcode not supported: %u\n", opcode);
1673 + return -EOPNOTSUPP;
1674 + }
1675 +
1676 + return cmd;
1677 +}
1678 +
1679 +static int qcom_spi_write_page(struct qcom_nand_controller *snandc,
1680 + const struct spi_mem_op *op)
1681 +{
1682 + int cmd;
1683 +
1684 + cmd = qcom_spi_cmd_mapping(snandc, op->cmd.opcode);
1685 + if (cmd < 0)
1686 + return cmd;
1687 +
1688 + if (op->cmd.opcode == SPINAND_PROGRAM_LOAD)
1689 + snandc->qspi->data_buf = (u8 *)op->data.buf.out;
1690 +
1691 + return 0;
1692 +}
1693 +
1694 +static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc,
1695 + const struct spi_mem_op *op)
1696 +{
1697 + struct qpic_snand_op s_op = {};
1698 + u32 cmd;
1699 + int ret, opcode;
1700 +
1701 + ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode);
1702 + if (ret < 0)
1703 + return ret;
1704 +
1705 + cmd = ret;
1706 +
1707 + s_op.cmd_reg = cmd;
1708 + s_op.addr1_reg = op->addr.val;
1709 + s_op.addr2_reg = 0;
1710 +
1711 + opcode = op->cmd.opcode;
1712 +
1713 + switch (opcode) {
1714 + case SPINAND_WRITE_EN:
1715 + return 0;
1716 + case SPINAND_PROGRAM_EXECUTE:
1717 + s_op.addr1_reg = op->addr.val << 16;
1718 + s_op.addr2_reg = op->addr.val >> 16 & 0xff;
1719 + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
1720 + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
1721 + snandc->qspi->cmd = cpu_to_le32(cmd);
1722 + return qcom_spi_program_execute(snandc, op);
1723 + case SPINAND_READ:
1724 + s_op.addr1_reg = (op->addr.val << 16);
1725 + s_op.addr2_reg = op->addr.val >> 16 & 0xff;
1726 + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg);
1727 + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
1728 + snandc->qspi->cmd = cpu_to_le32(cmd);
1729 + return 0;
1730 + case SPINAND_ERASE:
1731 + s_op.addr2_reg = (op->addr.val >> 16) & 0xffff;
1732 + s_op.addr1_reg = op->addr.val;
1733 + snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16);
1734 + snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg);
1735 + snandc->qspi->cmd = cpu_to_le32(cmd);
1736 + qcom_spi_block_erase(snandc);
1737 + return 0;
1738 + default:
1739 + break;
1740 + }
1741 +
1742 + snandc->buf_count = 0;
1743 + snandc->buf_start = 0;
1744 + qcom_clear_read_regs(snandc);
1745 + qcom_clear_bam_transaction(snandc);
1746 +
1747 + snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg);
1748 + snandc->regs->exec = cpu_to_le32(1);
1749 + snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg);
1750 + snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg);
1751 +
1752 + qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
1753 + qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
1754 +
1755 + ret = qcom_submit_descs(snandc);
1756 + if (ret)
1757 + dev_err(snandc->dev, "failure in submitting cmd descriptor\n");
1758 +
1759 + return ret;
1760 +}
1761 +
1762 +static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op)
1763 +{
1764 + int ret, val, opcode;
1765 + bool copy = false, copy_ftr = false;
1766 +
1767 + ret = qcom_spi_send_cmdaddr(snandc, op);
1768 + if (ret)
1769 + return ret;
1770 +
1771 + snandc->buf_count = 0;
1772 + snandc->buf_start = 0;
1773 + qcom_clear_read_regs(snandc);
1774 + qcom_clear_bam_transaction(snandc);
1775 + opcode = op->cmd.opcode;
1776 +
1777 + switch (opcode) {
1778 + case SPINAND_READID:
1779 + snandc->buf_count = 4;
1780 + qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
1781 + copy = true;
1782 + break;
1783 + case SPINAND_GET_FEATURE:
1784 + snandc->buf_count = 4;
1785 + qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
1786 + copy_ftr = true;
1787 + break;
1788 + case SPINAND_SET_FEATURE:
1789 + snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out);
1790 + qcom_write_reg_dma(snandc, &snandc->regs->flash_feature,
1791 + NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL);
1792 + break;
1793 + case SPINAND_PROGRAM_EXECUTE:
1794 + case SPINAND_WRITE_EN:
1795 + case SPINAND_RESET:
1796 + case SPINAND_ERASE:
1797 + case SPINAND_READ:
1798 + return 0;
1799 + default:
1800 + return -EOPNOTSUPP;
1801 + }
1802 +
1803 + ret = qcom_submit_descs(snandc);
1804 + if (ret)
1805 + dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode);
1806 +
1807 + if (copy) {
1808 + qcom_nandc_dev_to_mem(snandc, true);
1809 + memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count);
1810 + }
1811 +
1812 + if (copy_ftr) {
1813 + qcom_nandc_dev_to_mem(snandc, true);
1814 + val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf);
1815 + val >>= 8;
1816 + memcpy(op->data.buf.in, &val, snandc->buf_count);
1817 + }
1818 +
1819 + return ret;
1820 +}
1821 +
1822 +static bool qcom_spi_is_page_op(const struct spi_mem_op *op)
1823 +{
1824 + if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4)
1825 + return false;
1826 +
1827 + if (op->data.dir == SPI_MEM_DATA_IN) {
1828 + if (op->addr.buswidth == 4 && op->data.buswidth == 4)
1829 + return true;
1830 +
1831 + if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
1832 + return true;
1833 +
1834 + } else if (op->data.dir == SPI_MEM_DATA_OUT) {
1835 + if (op->data.buswidth == 4)
1836 + return true;
1837 + if (op->addr.nbytes == 2 && op->addr.buswidth == 1)
1838 + return true;
1839 + }
1840 +
1841 + return false;
1842 +}
1843 +
1844 +static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
1845 +{
1846 + if (!spi_mem_default_supports_op(mem, op))
1847 + return false;
1848 +
1849 + if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1)
1850 + return false;
1851 +
1852 + if (qcom_spi_is_page_op(op))
1853 + return true;
1854 +
1855 + return ((!op->addr.nbytes || op->addr.buswidth == 1) &&
1856 + (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
1857 + (!op->data.nbytes || op->data.buswidth == 1));
1858 +}
1859 +
1860 +static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
1861 +{
1862 + struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller);
1863 +
1864 + dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode,
1865 + op->addr.val, op->addr.buswidth, op->addr.nbytes,
1866 + op->data.buswidth, op->data.nbytes);
1867 +
1868 + if (qcom_spi_is_page_op(op)) {
1869 + if (op->data.dir == SPI_MEM_DATA_IN)
1870 + return qcom_spi_read_page(snandc, op);
1871 + if (op->data.dir == SPI_MEM_DATA_OUT)
1872 + return qcom_spi_write_page(snandc, op);
1873 + } else {
1874 + return qcom_spi_io_op(snandc, op);
1875 + }
1876 +
1877 + return 0;
1878 +}
1879 +
1880 +static const struct spi_controller_mem_ops qcom_spi_mem_ops = {
1881 + .supports_op = qcom_spi_supports_op,
1882 + .exec_op = qcom_spi_exec_op,
1883 +};
1884 +
1885 +static const struct spi_controller_mem_caps qcom_spi_mem_caps = {
1886 + .ecc = true,
1887 +};
1888 +
1889 +static int qcom_spi_probe(struct platform_device *pdev)
1890 +{
1891 + struct device *dev = &pdev->dev;
1892 + struct spi_controller *ctlr;
1893 + struct qcom_nand_controller *snandc;
1894 + struct qpic_spi_nand *qspi;
1895 + struct qpic_ecc *ecc;
1896 + struct resource *res;
1897 + const void *dev_data;
1898 + int ret;
1899 +
1900 + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
1901 + if (!ecc)
1902 + return -ENOMEM;
1903 +
1904 + qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
1905 + if (!qspi)
1906 + return -ENOMEM;
1907 +
1908 + ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false);
1909 + if (!ctlr)
1910 + return -ENOMEM;
1911 +
1912 + platform_set_drvdata(pdev, ctlr);
1913 +
1914 + snandc = spi_controller_get_devdata(ctlr);
1915 + qspi->snandc = snandc;
1916 +
1917 + snandc->dev = dev;
1918 + snandc->qspi = qspi;
1919 + snandc->qspi->ctlr = ctlr;
1920 + snandc->qspi->ecc = ecc;
1921 +
1922 + dev_data = of_device_get_match_data(dev);
1923 + if (!dev_data) {
1924 + dev_err(&pdev->dev, "failed to get device data\n");
1925 + return -ENODEV;
1926 + }
1927 +
1928 + snandc->props = dev_data;
1929 + snandc->dev = &pdev->dev;
1930 +
1931 + snandc->core_clk = devm_clk_get(dev, "core");
1932 + if (IS_ERR(snandc->core_clk))
1933 + return PTR_ERR(snandc->core_clk);
1934 +
1935 + snandc->aon_clk = devm_clk_get(dev, "aon");
1936 + if (IS_ERR(snandc->aon_clk))
1937 + return PTR_ERR(snandc->aon_clk);
1938 +
1939 + snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom");
1940 + if (IS_ERR(snandc->qspi->iomacro_clk))
1941 + return PTR_ERR(snandc->qspi->iomacro_clk);
1942 +
1943 + snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1944 + if (IS_ERR(snandc->base))
1945 + return PTR_ERR(snandc->base);
1946 +
1947 + snandc->base_phys = res->start;
1948 + snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res),
1949 + DMA_BIDIRECTIONAL, 0);
1950 + if (dma_mapping_error(dev, snandc->base_dma))
1951 + return -ENXIO;
1952 +
1953 + ret = clk_prepare_enable(snandc->core_clk);
1954 + if (ret)
1955 + goto err_dis_core_clk;
1956 +
1957 + ret = clk_prepare_enable(snandc->aon_clk);
1958 + if (ret)
1959 + goto err_dis_aon_clk;
1960 +
1961 + ret = clk_prepare_enable(snandc->qspi->iomacro_clk);
1962 + if (ret)
1963 + goto err_dis_iom_clk;
1964 +
1965 + ret = qcom_nandc_alloc(snandc);
1966 + if (ret)
1967 + goto err_snand_alloc;
1968 +
1969 + ret = qcom_spi_init(snandc);
1970 + if (ret)
1971 + goto err_spi_init;
1972 +
1973 + /* setup ECC engine */
1974 + snandc->qspi->ecc_eng.dev = &pdev->dev;
1975 + snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED;
1976 + snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined;
1977 + snandc->qspi->ecc_eng.priv = snandc;
1978 +
1979 + ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng);
1980 + if (ret) {
1981 + dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret);
1982 + goto err_spi_init;
1983 + }
1984 +
1985 + ctlr->num_chipselect = QPIC_QSPI_NUM_CS;
1986 + ctlr->mem_ops = &qcom_spi_mem_ops;
1987 + ctlr->mem_caps = &qcom_spi_mem_caps;
1988 + ctlr->dev.of_node = pdev->dev.of_node;
1989 + ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL |
1990 + SPI_TX_QUAD | SPI_RX_QUAD;
1991 +
1992 + ret = spi_register_controller(ctlr);
1993 + if (ret) {
1994 + dev_err(&pdev->dev, "spi_register_controller failed.\n");
1995 + goto err_spi_init;
1996 + }
1997 +
1998 + return 0;
1999 +
2000 +err_spi_init:
2001 + qcom_nandc_unalloc(snandc);
2002 +err_snand_alloc:
2003 + clk_disable_unprepare(snandc->qspi->iomacro_clk);
2004 +err_dis_iom_clk:
2005 + clk_disable_unprepare(snandc->aon_clk);
2006 +err_dis_aon_clk:
2007 + clk_disable_unprepare(snandc->core_clk);
2008 +err_dis_core_clk:
2009 + dma_unmap_resource(dev, res->start, resource_size(res),
2010 + DMA_BIDIRECTIONAL, 0);
2011 + return ret;
2012 +}
2013 +
2014 +static void qcom_spi_remove(struct platform_device *pdev)
2015 +{
2016 + struct spi_controller *ctlr = platform_get_drvdata(pdev);
2017 + struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr);
2018 + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2019 +
2020 + spi_unregister_controller(ctlr);
2021 +
2022 + qcom_nandc_unalloc(snandc);
2023 +
2024 + clk_disable_unprepare(snandc->aon_clk);
2025 + clk_disable_unprepare(snandc->core_clk);
2026 + clk_disable_unprepare(snandc->qspi->iomacro_clk);
2027 +
2028 + dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res),
2029 + DMA_BIDIRECTIONAL, 0);
2030 +}
2031 +
2032 +static const struct qcom_nandc_props ipq9574_snandc_props = {
2033 + .dev_cmd_reg_start = 0x7000,
2034 + .supports_bam = true,
2035 +};
2036 +
2037 +static const struct of_device_id qcom_snandc_of_match[] = {
2038 + {
2039 + .compatible = "qcom,ipq9574-snand",
2040 + .data = &ipq9574_snandc_props,
2041 + },
2042 + {}
2043 +}
2044 +MODULE_DEVICE_TABLE(of, qcom_snandc_of_match);
2045 +
2046 +static struct platform_driver qcom_spi_driver = {
2047 + .driver = {
2048 + .name = "qcom_snand",
2049 + .of_match_table = qcom_snandc_of_match,
2050 + },
2051 + .probe = qcom_spi_probe,
2052 + .remove_new = qcom_spi_remove,
2053 +};
2054 +module_platform_driver(qcom_spi_driver);
2055 +
2056 +MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores");
2057 +MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>");
2058 +MODULE_LICENSE("GPL");
2059 +
2060 --- a/include/linux/mtd/nand-qpic-common.h
2061 +++ b/include/linux/mtd/nand-qpic-common.h
2062 @@ -325,6 +325,10 @@ struct nandc_regs {
2063 __le32 read_location_last1;
2064 __le32 read_location_last2;
2065 __le32 read_location_last3;
2066 + __le32 spi_cfg;
2067 + __le32 num_addr_cycle;
2068 + __le32 busy_wait_cnt;
2069 + __le32 flash_feature;
2070
2071 __le32 erased_cw_detect_cfg_clr;
2072 __le32 erased_cw_detect_cfg_set;
2073 @@ -339,6 +343,7 @@ struct nandc_regs {
2074 *
2075 * @core_clk: controller clock
2076 * @aon_clk: another controller clock
2077 + * @iomacro_clk: io macro clock
2078 *
2079 * @regs: a contiguous chunk of memory for DMA register
2080 * writes. contains the register values to be
2081 @@ -348,6 +353,7 @@ struct nandc_regs {
2082 * initialized via DT match data
2083 *
2084 * @controller: base controller structure
2085 + * @qspi: qpic spi structure
2086 * @host_list: list containing all the chips attached to the
2087 * controller
2088 *
2089 @@ -392,6 +398,7 @@ struct qcom_nand_controller {
2090 const struct qcom_nandc_props *props;
2091
2092 struct nand_controller *controller;
2093 + struct qpic_spi_nand *qspi;
2094 struct list_head host_list;
2095
2096 union {