Merge tag 'nand/for-6.7' into mtd/next
[linux-2.6-microblaze.git] / drivers / mtd / nand / raw / cadence-nand-controller.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Cadence NAND flash controller driver
4  *
5  * Copyright (C) 2019 Cadence
6  *
7  * Author: Piotr Sroka <piotrs@cadence.com>
8  */
9
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/rawnand.h>
18 #include <linux/iopoll.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/property.h>
22 #include <linux/slab.h>
23
24 /*
25  * HPNFC can work in 3 modes:
26  * -  PIO - can work in master or slave DMA
27  * -  CDMA - needs Master DMA for accessing command descriptors.
28  * -  Generic mode - can use only slave DMA.
29  * CDMA and PIO modes can be used to execute only base commands.
30  * Generic mode can be used to execute any command
31  * on NAND flash memory. Driver uses CDMA mode for
32  * block erasing, page reading, page programing.
33  * Generic mode is used for executing rest of commands.
34  */
35
36 #define MAX_ADDRESS_CYC         6
37 #define MAX_ERASE_ADDRESS_CYC   3
38 #define MAX_DATA_SIZE           0xFFFC
39 #define DMA_DATA_SIZE_ALIGN     8
40
41 /* Register definition. */
42 /*
43  * Command register 0.
44  * Writing data to this register will initiate a new transaction
45  * of the NF controller.
46  */
47 #define CMD_REG0                        0x0000
48 /* Command type field mask. */
49 #define         CMD_REG0_CT             GENMASK(31, 30)
50 /* Command type CDMA. */
51 #define         CMD_REG0_CT_CDMA        0uL
52 /* Command type generic. */
53 #define         CMD_REG0_CT_GEN         3uL
54 /* Command thread number field mask. */
55 #define         CMD_REG0_TN             GENMASK(27, 24)
56
57 /* Command register 2. */
58 #define CMD_REG2                        0x0008
59 /* Command register 3. */
60 #define CMD_REG3                        0x000C
61 /* Pointer register to select which thread status will be selected. */
62 #define CMD_STATUS_PTR                  0x0010
63 /* Command status register for selected thread. */
64 #define CMD_STATUS                      0x0014
65
66 /* Interrupt status register. */
67 #define INTR_STATUS                     0x0110
68 #define         INTR_STATUS_SDMA_ERR    BIT(22)
69 #define         INTR_STATUS_SDMA_TRIGG  BIT(21)
70 #define         INTR_STATUS_UNSUPP_CMD  BIT(19)
71 #define         INTR_STATUS_DDMA_TERR   BIT(18)
72 #define         INTR_STATUS_CDMA_TERR   BIT(17)
73 #define         INTR_STATUS_CDMA_IDL    BIT(16)
74
75 /* Interrupt enable register. */
76 #define INTR_ENABLE                             0x0114
77 #define         INTR_ENABLE_INTR_EN             BIT(31)
78 #define         INTR_ENABLE_SDMA_ERR_EN         BIT(22)
79 #define         INTR_ENABLE_SDMA_TRIGG_EN       BIT(21)
80 #define         INTR_ENABLE_UNSUPP_CMD_EN       BIT(19)
81 #define         INTR_ENABLE_DDMA_TERR_EN        BIT(18)
82 #define         INTR_ENABLE_CDMA_TERR_EN        BIT(17)
83 #define         INTR_ENABLE_CDMA_IDLE_EN        BIT(16)
84
85 /* Controller internal state. */
86 #define CTRL_STATUS                             0x0118
87 #define         CTRL_STATUS_INIT_COMP           BIT(9)
88 #define         CTRL_STATUS_CTRL_BUSY           BIT(8)
89
90 /* Command Engine threads state. */
91 #define TRD_STATUS                              0x0120
92
93 /* Command Engine interrupt thread error status. */
94 #define TRD_ERR_INT_STATUS                      0x0128
95 /* Command Engine interrupt thread error enable. */
96 #define TRD_ERR_INT_STATUS_EN                   0x0130
97 /* Command Engine interrupt thread complete status. */
98 #define TRD_COMP_INT_STATUS                     0x0138
99
100 /*
101  * Transfer config 0 register.
102  * Configures data transfer parameters.
103  */
104 #define TRAN_CFG_0                              0x0400
105 /* Offset value from the beginning of the page. */
106 #define         TRAN_CFG_0_OFFSET               GENMASK(31, 16)
107 /* Numbers of sectors to transfer within singlNF device's page. */
108 #define         TRAN_CFG_0_SEC_CNT              GENMASK(7, 0)
109
110 /*
111  * Transfer config 1 register.
112  * Configures data transfer parameters.
113  */
114 #define TRAN_CFG_1                              0x0404
115 /* Size of last data sector. */
116 #define         TRAN_CFG_1_LAST_SEC_SIZE        GENMASK(31, 16)
117 /* Size of not-last data sector. */
118 #define         TRAN_CFG_1_SECTOR_SIZE          GENMASK(15, 0)
119
120 /* ECC engine configuration register 0. */
121 #define ECC_CONFIG_0                            0x0428
122 /* Correction strength. */
123 #define         ECC_CONFIG_0_CORR_STR           GENMASK(10, 8)
124 /* Enable erased pages detection mechanism. */
125 #define         ECC_CONFIG_0_ERASE_DET_EN       BIT(1)
126 /* Enable controller ECC check bits generation and correction. */
127 #define         ECC_CONFIG_0_ECC_EN             BIT(0)
128
129 /* ECC engine configuration register 1. */
130 #define ECC_CONFIG_1                            0x042C
131
132 /* Multiplane settings register. */
133 #define MULTIPLANE_CFG                          0x0434
134 /* Cache operation settings. */
135 #define CACHE_CFG                               0x0438
136
137 /* DMA settings register. */
138 #define DMA_SETINGS                             0x043C
139 /* Enable SDMA error report on access unprepared slave DMA interface. */
140 #define         DMA_SETINGS_SDMA_ERR_RSP        BIT(17)
141
142 /* Transferred data block size for the slave DMA module. */
143 #define SDMA_SIZE                               0x0440
144
145 /* Thread number associated with transferred data block
146  * for the slave DMA module.
147  */
148 #define SDMA_TRD_NUM                            0x0444
149 /* Thread number mask. */
150 #define         SDMA_TRD_NUM_SDMA_TRD           GENMASK(2, 0)
151
152 #define CONTROL_DATA_CTRL                       0x0494
153 /* Thread number mask. */
154 #define         CONTROL_DATA_CTRL_SIZE          GENMASK(15, 0)
155
156 #define CTRL_VERSION                            0x800
157 #define         CTRL_VERSION_REV                GENMASK(7, 0)
158
159 /* Available hardware features of the controller. */
160 #define CTRL_FEATURES                           0x804
161 /* Support for NV-DDR2/3 work mode. */
162 #define         CTRL_FEATURES_NVDDR_2_3         BIT(28)
163 /* Support for NV-DDR work mode. */
164 #define         CTRL_FEATURES_NVDDR             BIT(27)
165 /* Support for asynchronous work mode. */
166 #define         CTRL_FEATURES_ASYNC             BIT(26)
167 /* Support for asynchronous work mode. */
168 #define         CTRL_FEATURES_N_BANKS           GENMASK(25, 24)
169 /* Slave and Master DMA data width. */
170 #define         CTRL_FEATURES_DMA_DWITH64       BIT(21)
171 /* Availability of Control Data feature.*/
172 #define         CTRL_FEATURES_CONTROL_DATA      BIT(10)
173
174 /* BCH Engine identification register 0 - correction strengths. */
175 #define BCH_CFG_0                               0x838
176 #define         BCH_CFG_0_CORR_CAP_0            GENMASK(7, 0)
177 #define         BCH_CFG_0_CORR_CAP_1            GENMASK(15, 8)
178 #define         BCH_CFG_0_CORR_CAP_2            GENMASK(23, 16)
179 #define         BCH_CFG_0_CORR_CAP_3            GENMASK(31, 24)
180
181 /* BCH Engine identification register 1 - correction strengths. */
182 #define BCH_CFG_1                               0x83C
183 #define         BCH_CFG_1_CORR_CAP_4            GENMASK(7, 0)
184 #define         BCH_CFG_1_CORR_CAP_5            GENMASK(15, 8)
185 #define         BCH_CFG_1_CORR_CAP_6            GENMASK(23, 16)
186 #define         BCH_CFG_1_CORR_CAP_7            GENMASK(31, 24)
187
188 /* BCH Engine identification register 2 - sector sizes. */
189 #define BCH_CFG_2                               0x840
190 #define         BCH_CFG_2_SECT_0                GENMASK(15, 0)
191 #define         BCH_CFG_2_SECT_1                GENMASK(31, 16)
192
193 /* BCH Engine identification register 3. */
194 #define BCH_CFG_3                               0x844
195 #define         BCH_CFG_3_METADATA_SIZE         GENMASK(23, 16)
196
197 /* Ready/Busy# line status. */
198 #define RBN_SETINGS                             0x1004
199
200 /* Common settings. */
201 #define COMMON_SET                              0x1008
202 /* 16 bit device connected to the NAND Flash interface. */
203 #define         COMMON_SET_DEVICE_16BIT         BIT(8)
204
205 /* Skip_bytes registers. */
206 #define SKIP_BYTES_CONF                         0x100C
207 #define         SKIP_BYTES_MARKER_VALUE         GENMASK(31, 16)
208 #define         SKIP_BYTES_NUM_OF_BYTES         GENMASK(7, 0)
209
210 #define SKIP_BYTES_OFFSET                       0x1010
211 #define          SKIP_BYTES_OFFSET_VALUE        GENMASK(23, 0)
212
213 /* Timings configuration. */
214 #define ASYNC_TOGGLE_TIMINGS                    0x101c
215 #define         ASYNC_TOGGLE_TIMINGS_TRH        GENMASK(28, 24)
216 #define         ASYNC_TOGGLE_TIMINGS_TRP        GENMASK(20, 16)
217 #define         ASYNC_TOGGLE_TIMINGS_TWH        GENMASK(12, 8)
218 #define         ASYNC_TOGGLE_TIMINGS_TWP        GENMASK(4, 0)
219
220 #define TIMINGS0                                0x1024
221 #define         TIMINGS0_TADL                   GENMASK(31, 24)
222 #define         TIMINGS0_TCCS                   GENMASK(23, 16)
223 #define         TIMINGS0_TWHR                   GENMASK(15, 8)
224 #define         TIMINGS0_TRHW                   GENMASK(7, 0)
225
226 #define TIMINGS1                                0x1028
227 #define         TIMINGS1_TRHZ                   GENMASK(31, 24)
228 #define         TIMINGS1_TWB                    GENMASK(23, 16)
229 #define         TIMINGS1_TVDLY                  GENMASK(7, 0)
230
231 #define TIMINGS2                                0x102c
232 #define         TIMINGS2_TFEAT                  GENMASK(25, 16)
233 #define         TIMINGS2_CS_HOLD_TIME           GENMASK(13, 8)
234 #define         TIMINGS2_CS_SETUP_TIME          GENMASK(5, 0)
235
236 /* Configuration of the resynchronization of slave DLL of PHY. */
237 #define DLL_PHY_CTRL                            0x1034
238 #define         DLL_PHY_CTRL_DLL_RST_N          BIT(24)
239 #define         DLL_PHY_CTRL_EXTENDED_WR_MODE   BIT(17)
240 #define         DLL_PHY_CTRL_EXTENDED_RD_MODE   BIT(16)
241 #define         DLL_PHY_CTRL_RS_HIGH_WAIT_CNT   GENMASK(11, 8)
242 #define         DLL_PHY_CTRL_RS_IDLE_CNT        GENMASK(7, 0)
243
244 /* Register controlling DQ related timing. */
245 #define PHY_DQ_TIMING                           0x2000
246 /* Register controlling DSQ related timing.  */
247 #define PHY_DQS_TIMING                          0x2004
248 #define         PHY_DQS_TIMING_DQS_SEL_OE_END   GENMASK(3, 0)
249 #define         PHY_DQS_TIMING_PHONY_DQS_SEL    BIT(16)
250 #define         PHY_DQS_TIMING_USE_PHONY_DQS    BIT(20)
251
252 /* Register controlling the gate and loopback control related timing. */
253 #define PHY_GATE_LPBK_CTRL                      0x2008
254 #define         PHY_GATE_LPBK_CTRL_RDS          GENMASK(24, 19)
255
256 /* Register holds the control for the master DLL logic. */
257 #define PHY_DLL_MASTER_CTRL                     0x200C
258 #define         PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
259
260 /* Register holds the control for the slave DLL logic. */
261 #define PHY_DLL_SLAVE_CTRL                      0x2010
262
263 /* This register handles the global control settings for the PHY. */
264 #define PHY_CTRL                                0x2080
265 #define         PHY_CTRL_SDR_DQS                BIT(14)
266 #define         PHY_CTRL_PHONY_DQS              GENMASK(9, 4)
267
268 /*
269  * This register handles the global control settings
270  * for the termination selects for reads.
271  */
272 #define PHY_TSEL                                0x2084
273
274 /* Generic command layout. */
275 #define GCMD_LAY_CS                     GENMASK_ULL(11, 8)
276 /*
277  * This bit informs the minicotroller if it has to wait for tWB
278  * after sending the last CMD/ADDR/DATA in the sequence.
279  */
280 #define GCMD_LAY_TWB                    BIT_ULL(6)
281 /* Type of generic instruction. */
282 #define GCMD_LAY_INSTR                  GENMASK_ULL(5, 0)
283
284 /* Generic CMD sequence type. */
285 #define         GCMD_LAY_INSTR_CMD      0
286 /* Generic ADDR sequence type. */
287 #define         GCMD_LAY_INSTR_ADDR     1
288 /* Generic data transfer sequence type. */
289 #define         GCMD_LAY_INSTR_DATA     2
290
291 /* Input part of generic command type of input is command. */
292 #define GCMD_LAY_INPUT_CMD              GENMASK_ULL(23, 16)
293
294 /* Generic command address sequence - address fields. */
295 #define GCMD_LAY_INPUT_ADDR             GENMASK_ULL(63, 16)
296 /* Generic command address sequence - address size. */
297 #define GCMD_LAY_INPUT_ADDR_SIZE        GENMASK_ULL(13, 11)
298
299 /* Transfer direction field of generic command data sequence. */
300 #define GCMD_DIR                        BIT_ULL(11)
301 /* Read transfer direction of generic command data sequence. */
302 #define         GCMD_DIR_READ           0
303 /* Write transfer direction of generic command data sequence. */
304 #define         GCMD_DIR_WRITE          1
305
306 /* ECC enabled flag of generic command data sequence - ECC enabled. */
307 #define GCMD_ECC_EN                     BIT_ULL(12)
308 /* Generic command data sequence - sector size. */
309 #define GCMD_SECT_SIZE                  GENMASK_ULL(31, 16)
310 /* Generic command data sequence - sector count. */
311 #define GCMD_SECT_CNT                   GENMASK_ULL(39, 32)
312 /* Generic command data sequence - last sector size. */
313 #define GCMD_LAST_SIZE                  GENMASK_ULL(55, 40)
314
315 /* CDMA descriptor fields. */
316 /* Erase command type of CDMA descriptor. */
317 #define CDMA_CT_ERASE           0x1000
318 /* Program page command type of CDMA descriptor. */
319 #define CDMA_CT_WR              0x2100
320 /* Read page command type of CDMA descriptor. */
321 #define CDMA_CT_RD              0x2200
322
323 /* Flash pointer memory shift. */
324 #define CDMA_CFPTR_MEM_SHIFT    24
325 /* Flash pointer memory mask. */
326 #define CDMA_CFPTR_MEM          GENMASK(26, 24)
327
328 /*
329  * Command DMA descriptor flags. If set causes issue interrupt after
330  * the completion of descriptor processing.
331  */
332 #define CDMA_CF_INT             BIT(8)
333 /*
334  * Command DMA descriptor flags - the next descriptor
335  * address field is valid and descriptor processing should continue.
336  */
337 #define CDMA_CF_CONT            BIT(9)
338 /* DMA master flag of command DMA descriptor. */
339 #define CDMA_CF_DMA_MASTER      BIT(10)
340
341 /* Operation complete status of command descriptor. */
342 #define CDMA_CS_COMP            BIT(15)
343 /* Operation complete status of command descriptor. */
344 /* Command descriptor status - operation fail. */
345 #define CDMA_CS_FAIL            BIT(14)
346 /* Command descriptor status - page erased. */
347 #define CDMA_CS_ERP             BIT(11)
348 /* Command descriptor status - timeout occurred. */
349 #define CDMA_CS_TOUT            BIT(10)
350 /*
351  * Maximum amount of correction applied to one ECC sector.
352  * It is part of command descriptor status.
353  */
354 #define CDMA_CS_MAXERR          GENMASK(9, 2)
355 /* Command descriptor status - uncorrectable ECC error. */
356 #define CDMA_CS_UNCE            BIT(1)
357 /* Command descriptor status - descriptor error. */
358 #define CDMA_CS_ERR             BIT(0)
359
360 /* Status of operation - OK. */
361 #define STAT_OK                 0
362 /* Status of operation - FAIL. */
363 #define STAT_FAIL               2
364 /* Status of operation - uncorrectable ECC error. */
365 #define STAT_ECC_UNCORR         3
366 /* Status of operation - page erased. */
367 #define STAT_ERASED             5
368 /* Status of operation - correctable ECC error. */
369 #define STAT_ECC_CORR           6
370 /* Status of operation - unsuspected state. */
371 #define STAT_UNKNOWN            7
372 /* Status of operation - operation is not completed yet. */
373 #define STAT_BUSY               0xFF
374
375 #define BCH_MAX_NUM_CORR_CAPS           8
376 #define BCH_MAX_NUM_SECTOR_SIZES        2
377
378 struct cadence_nand_timings {
379         u32 async_toggle_timings;
380         u32 timings0;
381         u32 timings1;
382         u32 timings2;
383         u32 dll_phy_ctrl;
384         u32 phy_ctrl;
385         u32 phy_dqs_timing;
386         u32 phy_gate_lpbk_ctrl;
387 };
388
389 /* Command DMA descriptor. */
390 struct cadence_nand_cdma_desc {
391         /* Next descriptor address. */
392         u64 next_pointer;
393
394         /* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
395         u32 flash_pointer;
396         /*field appears in HPNFC version 13*/
397         u16 bank;
398         u16 rsvd0;
399
400         /* Operation the controller needs to perform. */
401         u16 command_type;
402         u16 rsvd1;
403         /* Flags for operation of this command. */
404         u16 command_flags;
405         u16 rsvd2;
406
407         /* System/host memory address required for data DMA commands. */
408         u64 memory_pointer;
409
410         /* Status of operation. */
411         u32 status;
412         u32 rsvd3;
413
414         /* Address pointer to sync buffer location. */
415         u64 sync_flag_pointer;
416
417         /* Controls the buffer sync mechanism. */
418         u32 sync_arguments;
419         u32 rsvd4;
420
421         /* Control data pointer. */
422         u64 ctrl_data_ptr;
423 };
424
425 /* Interrupt status. */
426 struct cadence_nand_irq_status {
427         /* Thread operation complete status. */
428         u32 trd_status;
429         /* Thread operation error. */
430         u32 trd_error;
431         /* Controller status. */
432         u32 status;
433 };
434
435 /* Cadence NAND flash controller capabilities get from driver data. */
436 struct cadence_nand_dt_devdata {
437         /* Skew value of the output signals of the NAND Flash interface. */
438         u32 if_skew;
439         /* It informs if slave DMA interface is connected to DMA engine. */
440         unsigned int has_dma:1;
441 };
442
443 /* Cadence NAND flash controller capabilities read from registers. */
444 struct cdns_nand_caps {
445         /* Maximum number of banks supported by hardware. */
446         u8 max_banks;
447         /* Slave and Master DMA data width in bytes (4 or 8). */
448         u8 data_dma_width;
449         /* Control Data feature supported. */
450         bool data_control_supp;
451         /* Is PHY type DLL. */
452         bool is_phy_type_dll;
453 };
454
455 struct cdns_nand_ctrl {
456         struct device *dev;
457         struct nand_controller controller;
458         struct cadence_nand_cdma_desc *cdma_desc;
459         /* IP capability. */
460         const struct cadence_nand_dt_devdata *caps1;
461         struct cdns_nand_caps caps2;
462         u8 ctrl_rev;
463         dma_addr_t dma_cdma_desc;
464         u8 *buf;
465         u32 buf_size;
466         u8 curr_corr_str_idx;
467
468         /* Register interface. */
469         void __iomem *reg;
470
471         struct {
472                 void __iomem *virt;
473                 dma_addr_t dma;
474         } io;
475
476         int irq;
477         /* Interrupts that have happened. */
478         struct cadence_nand_irq_status irq_status;
479         /* Interrupts we are waiting for. */
480         struct cadence_nand_irq_status irq_mask;
481         struct completion complete;
482         /* Protect irq_mask and irq_status. */
483         spinlock_t irq_lock;
484
485         int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
486         struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
487         struct nand_ecc_caps ecc_caps;
488
489         int curr_trans_type;
490
491         struct dma_chan *dmac;
492
493         u32 nf_clk_rate;
494         /*
495          * Estimated Board delay. The value includes the total
496          * round trip delay for the signals and is used for deciding on values
497          * associated with data read capture.
498          */
499         u32 board_delay;
500
501         struct nand_chip *selected_chip;
502
503         unsigned long assigned_cs;
504         struct list_head chips;
505         u8 bch_metadata_size;
506 };
507
508 struct cdns_nand_chip {
509         struct cadence_nand_timings timings;
510         struct nand_chip chip;
511         u8 nsels;
512         struct list_head node;
513
514         /*
515          * part of oob area of NAND flash memory page.
516          * This part is available for user to read or write.
517          */
518         u32 avail_oob_size;
519
520         /* Sector size. There are few sectors per mtd->writesize */
521         u32 sector_size;
522         u32 sector_count;
523
524         /* Offset of BBM. */
525         u8 bbm_offs;
526         /* Number of bytes reserved for BBM. */
527         u8 bbm_len;
528         /* ECC strength index. */
529         u8 corr_str_idx;
530
531         u8 cs[] __counted_by(nsels);
532 };
533
534 struct ecc_info {
535         int (*calc_ecc_bytes)(int step_size, int strength);
536         int max_step_size;
537 };
538
539 static inline struct
540 cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
541 {
542         return container_of(chip, struct cdns_nand_chip, chip);
543 }
544
545 static inline struct
546 cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
547 {
548         return container_of(controller, struct cdns_nand_ctrl, controller);
549 }
550
551 static bool
552 cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
553                         u32 buf_len)
554 {
555         u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
556
557         return buf && virt_addr_valid(buf) &&
558                 likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
559                 likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
560 }
561
562 static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
563                                        u32 reg_offset, u32 timeout_us,
564                                        u32 mask, bool is_clear)
565 {
566         u32 val;
567         int ret;
568
569         ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
570                                          val, !(val & mask) == is_clear,
571                                          10, timeout_us);
572
573         if (ret < 0) {
574                 dev_err(cdns_ctrl->dev,
575                         "Timeout while waiting for reg %x with mask %x is clear %d\n",
576                         reg_offset, mask, is_clear);
577         }
578
579         return ret;
580 }
581
582 static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
583                                        bool enable)
584 {
585         u32 reg;
586
587         if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
588                                         1000000,
589                                         CTRL_STATUS_CTRL_BUSY, true))
590                 return -ETIMEDOUT;
591
592         reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
593
594         if (enable)
595                 reg |= ECC_CONFIG_0_ECC_EN;
596         else
597                 reg &= ~ECC_CONFIG_0_ECC_EN;
598
599         writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
600
601         return 0;
602 }
603
604 static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
605                                           u8 corr_str_idx)
606 {
607         u32 reg;
608
609         if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
610                 return;
611
612         reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
613         reg &= ~ECC_CONFIG_0_CORR_STR;
614         reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
615         writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
616
617         cdns_ctrl->curr_corr_str_idx = corr_str_idx;
618 }
619
620 static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
621                                              u8 strength)
622 {
623         int i, corr_str_idx = -1;
624
625         for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
626                 if (cdns_ctrl->ecc_strengths[i] == strength) {
627                         corr_str_idx = i;
628                         break;
629                 }
630         }
631
632         return corr_str_idx;
633 }
634
635 static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
636                                             u16 marker_value)
637 {
638         u32 reg;
639
640         if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
641                                         1000000,
642                                         CTRL_STATUS_CTRL_BUSY, true))
643                 return -ETIMEDOUT;
644
645         reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
646         reg &= ~SKIP_BYTES_MARKER_VALUE;
647         reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
648                           marker_value);
649
650         writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
651
652         return 0;
653 }
654
655 static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
656                                             u8 num_of_bytes,
657                                             u32 offset_value,
658                                             int enable)
659 {
660         u32 reg, skip_bytes_offset;
661
662         if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
663                                         1000000,
664                                         CTRL_STATUS_CTRL_BUSY, true))
665                 return -ETIMEDOUT;
666
667         if (!enable) {
668                 num_of_bytes = 0;
669                 offset_value = 0;
670         }
671
672         reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
673         reg &= ~SKIP_BYTES_NUM_OF_BYTES;
674         reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
675                           num_of_bytes);
676         skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
677                                        offset_value);
678
679         writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
680         writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
681
682         return 0;
683 }
684
685 /* Functions enables/disables hardware detection of erased data */
686 static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
687                                              bool enable,
688                                              u8 bitflips_threshold)
689 {
690         u32 reg;
691
692         reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
693
694         if (enable)
695                 reg |= ECC_CONFIG_0_ERASE_DET_EN;
696         else
697                 reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
698
699         writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
700         writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
701 }
702
703 static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
704                                            bool bit_bus16)
705 {
706         u32 reg;
707
708         if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
709                                         1000000,
710                                         CTRL_STATUS_CTRL_BUSY, true))
711                 return -ETIMEDOUT;
712
713         reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
714
715         if (!bit_bus16)
716                 reg &= ~COMMON_SET_DEVICE_16BIT;
717         else
718                 reg |= COMMON_SET_DEVICE_16BIT;
719         writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
720
721         return 0;
722 }
723
724 static void
725 cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
726                              struct cadence_nand_irq_status *irq_status)
727 {
728         writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
729         writel_relaxed(irq_status->trd_status,
730                        cdns_ctrl->reg + TRD_COMP_INT_STATUS);
731         writel_relaxed(irq_status->trd_error,
732                        cdns_ctrl->reg + TRD_ERR_INT_STATUS);
733 }
734
735 static void
736 cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
737                              struct cadence_nand_irq_status *irq_status)
738 {
739         irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
740         irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
741                                                + TRD_COMP_INT_STATUS);
742         irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
743                                               + TRD_ERR_INT_STATUS);
744 }
745
746 static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
747                         struct cadence_nand_irq_status *irq_status)
748 {
749         cadence_nand_read_int_status(cdns_ctrl, irq_status);
750
751         return irq_status->status || irq_status->trd_status ||
752                 irq_status->trd_error;
753 }
754
755 static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
756 {
757         unsigned long flags;
758
759         spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
760         memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
761         memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
762         spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
763 }
764
765 /*
766  * This is the interrupt service routine. It handles all interrupts
767  * sent to this device.
768  */
769 static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
770 {
771         struct cdns_nand_ctrl *cdns_ctrl = dev_id;
772         struct cadence_nand_irq_status irq_status;
773         irqreturn_t result = IRQ_NONE;
774
775         spin_lock(&cdns_ctrl->irq_lock);
776
777         if (irq_detected(cdns_ctrl, &irq_status)) {
778                 /* Handle interrupt. */
779                 /* First acknowledge it. */
780                 cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
781                 /* Status in the device context for someone to read. */
782                 cdns_ctrl->irq_status.status |= irq_status.status;
783                 cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
784                 cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
785                 /* Notify anyone who cares that it happened. */
786                 complete(&cdns_ctrl->complete);
787                 /* Tell the OS that we've handled this. */
788                 result = IRQ_HANDLED;
789         }
790         spin_unlock(&cdns_ctrl->irq_lock);
791
792         return result;
793 }
794
795 static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
796                                       struct cadence_nand_irq_status *irq_mask)
797 {
798         writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
799                        cdns_ctrl->reg + INTR_ENABLE);
800
801         writel_relaxed(irq_mask->trd_error,
802                        cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
803 }
804
805 static void
806 cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
807                           struct cadence_nand_irq_status *irq_mask,
808                           struct cadence_nand_irq_status *irq_status)
809 {
810         unsigned long timeout = msecs_to_jiffies(10000);
811         unsigned long time_left;
812
813         time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
814                                                 timeout);
815
816         *irq_status = cdns_ctrl->irq_status;
817         if (time_left == 0) {
818                 /* Timeout error. */
819                 dev_err(cdns_ctrl->dev, "timeout occurred:\n");
820                 dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
821                         irq_status->status, irq_mask->status);
822                 dev_err(cdns_ctrl->dev,
823                         "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
824                         irq_status->trd_status, irq_mask->trd_status);
825                 dev_err(cdns_ctrl->dev,
826                         "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
827                         irq_status->trd_error, irq_mask->trd_error);
828         }
829 }
830
831 /* Execute generic command on NAND controller. */
832 static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
833                                          u8 chip_nr,
834                                          u64 mini_ctrl_cmd)
835 {
836         u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
837
838         mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
839         mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
840         mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
841
842         if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
843                                         1000000,
844                                         CTRL_STATUS_CTRL_BUSY, true))
845                 return -ETIMEDOUT;
846
847         cadence_nand_reset_irq(cdns_ctrl);
848
849         writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
850         writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
851
852         /* Select generic command. */
853         reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
854         /* Thread number. */
855         reg |= FIELD_PREP(CMD_REG0_TN, 0);
856
857         /* Issue command. */
858         writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
859
860         return 0;
861 }
862
863 /* Wait for data on slave DMA interface. */
864 static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
865                                      u8 *out_sdma_trd,
866                                      u32 *out_sdma_size)
867 {
868         struct cadence_nand_irq_status irq_mask, irq_status;
869
870         irq_mask.trd_status = 0;
871         irq_mask.trd_error = 0;
872         irq_mask.status = INTR_STATUS_SDMA_TRIGG
873                 | INTR_STATUS_SDMA_ERR
874                 | INTR_STATUS_UNSUPP_CMD;
875
876         cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
877         cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
878         if (irq_status.status == 0) {
879                 dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
880                 return -ETIMEDOUT;
881         }
882
883         if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
884                 *out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
885                 *out_sdma_trd  = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
886                 *out_sdma_trd =
887                         FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
888         } else {
889                 dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
890                         irq_status.status);
891                 return -EIO;
892         }
893
894         return 0;
895 }
896
897 static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
898 {
899         u32  reg;
900
901         reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
902
903         cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
904
905         if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
906                 cdns_ctrl->caps2.data_dma_width = 8;
907         else
908                 cdns_ctrl->caps2.data_dma_width = 4;
909
910         if (reg & CTRL_FEATURES_CONTROL_DATA)
911                 cdns_ctrl->caps2.data_control_supp = true;
912
913         if (reg & (CTRL_FEATURES_NVDDR_2_3
914                    | CTRL_FEATURES_NVDDR))
915                 cdns_ctrl->caps2.is_phy_type_dll = true;
916 }
917
918 /* Prepare CDMA descriptor. */
919 static void
920 cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
921                                char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
922                                    dma_addr_t ctrl_data_ptr, u16 ctype)
923 {
924         struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
925
926         memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
927
928         /* Set fields for one descriptor. */
929         cdma_desc->flash_pointer = flash_ptr;
930         if (cdns_ctrl->ctrl_rev >= 13)
931                 cdma_desc->bank = nf_mem;
932         else
933                 cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
934
935         cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
936         cdma_desc->command_flags  |= CDMA_CF_INT;
937
938         cdma_desc->memory_pointer = mem_ptr;
939         cdma_desc->status = 0;
940         cdma_desc->sync_flag_pointer = 0;
941         cdma_desc->sync_arguments = 0;
942
943         cdma_desc->command_type = ctype;
944         cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
945 }
946
947 static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
948                                         u32 desc_status)
949 {
950         if (desc_status & CDMA_CS_ERP)
951                 return STAT_ERASED;
952
953         if (desc_status & CDMA_CS_UNCE)
954                 return STAT_ECC_UNCORR;
955
956         if (desc_status & CDMA_CS_ERR) {
957                 dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
958                 return STAT_FAIL;
959         }
960
961         if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
962                 return STAT_ECC_CORR;
963
964         return STAT_FAIL;
965 }
966
967 static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
968 {
969         struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
970         u8 status = STAT_BUSY;
971
972         if (desc_ptr->status & CDMA_CS_FAIL) {
973                 status = cadence_nand_check_desc_error(cdns_ctrl,
974                                                        desc_ptr->status);
975                 dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
976         } else if (desc_ptr->status & CDMA_CS_COMP) {
977                 /* Descriptor finished with no errors. */
978                 if (desc_ptr->command_flags & CDMA_CF_CONT) {
979                         dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
980                         status = STAT_UNKNOWN;
981                 } else {
982                         /* Last descriptor.  */
983                         status = STAT_OK;
984                 }
985         }
986
987         return status;
988 }
989
990 static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
991                                   u8 thread)
992 {
993         u32 reg;
994         int status;
995
996         /* Wait for thread ready. */
997         status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
998                                              1000000,
999                                              BIT(thread), true);
1000         if (status)
1001                 return status;
1002
1003         cadence_nand_reset_irq(cdns_ctrl);
1004         reinit_completion(&cdns_ctrl->complete);
1005
1006         writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
1007                        cdns_ctrl->reg + CMD_REG2);
1008         writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
1009
1010         /* Select CDMA mode. */
1011         reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
1012         /* Thread number. */
1013         reg |= FIELD_PREP(CMD_REG0_TN, thread);
1014         /* Issue command. */
1015         writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
1016
1017         return 0;
1018 }
1019
1020 /* Send SDMA command and wait for finish. */
1021 static u32
1022 cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
1023                                 u8 thread)
1024 {
1025         struct cadence_nand_irq_status irq_mask, irq_status = {0};
1026         int status;
1027
1028         irq_mask.trd_status = BIT(thread);
1029         irq_mask.trd_error = BIT(thread);
1030         irq_mask.status = INTR_STATUS_CDMA_TERR;
1031
1032         cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
1033
1034         status = cadence_nand_cdma_send(cdns_ctrl, thread);
1035         if (status)
1036                 return status;
1037
1038         cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
1039
1040         if (irq_status.status == 0 && irq_status.trd_status == 0 &&
1041             irq_status.trd_error == 0) {
1042                 dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
1043                 return -ETIMEDOUT;
1044         }
1045         if (irq_status.status & irq_mask.status) {
1046                 dev_err(cdns_ctrl->dev, "CDMA command failed\n");
1047                 return -EIO;
1048         }
1049
1050         return 0;
1051 }
1052
1053 /*
1054  * ECC size depends on configured ECC strength and on maximum supported
1055  * ECC step size.
1056  */
1057 static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
1058 {
1059         int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
1060
1061         return ALIGN(nbytes, 2);
1062 }
1063
1064 #define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
1065         static int \
1066         cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
1067                                                     int strength)\
1068         {\
1069                 return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
1070         }
1071
1072 CADENCE_NAND_CALC_ECC_BYTES(256)
1073 CADENCE_NAND_CALC_ECC_BYTES(512)
1074 CADENCE_NAND_CALC_ECC_BYTES(1024)
1075 CADENCE_NAND_CALC_ECC_BYTES(2048)
1076 CADENCE_NAND_CALC_ECC_BYTES(4096)
1077
1078 /* Function reads BCH capabilities. */
1079 static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
1080 {
1081         struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
1082         int max_step_size = 0, nstrengths, i;
1083         u32 reg;
1084
1085         reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
1086         cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
1087         if (cdns_ctrl->bch_metadata_size < 4) {
1088                 dev_err(cdns_ctrl->dev,
1089                         "Driver needs at least 4 bytes of BCH meta data\n");
1090                 return -EIO;
1091         }
1092
1093         reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
1094         cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
1095         cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
1096         cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
1097         cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
1098
1099         reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
1100         cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
1101         cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
1102         cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
1103         cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
1104
1105         reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
1106         cdns_ctrl->ecc_stepinfos[0].stepsize =
1107                 FIELD_GET(BCH_CFG_2_SECT_0, reg);
1108
1109         cdns_ctrl->ecc_stepinfos[1].stepsize =
1110                 FIELD_GET(BCH_CFG_2_SECT_1, reg);
1111
1112         nstrengths = 0;
1113         for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
1114                 if (cdns_ctrl->ecc_strengths[i] != 0)
1115                         nstrengths++;
1116         }
1117
1118         ecc_caps->nstepinfos = 0;
1119         for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
1120                 /* ECC strengths are common for all step infos. */
1121                 cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
1122                 cdns_ctrl->ecc_stepinfos[i].strengths =
1123                         cdns_ctrl->ecc_strengths;
1124
1125                 if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
1126                         ecc_caps->nstepinfos++;
1127
1128                 if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
1129                         max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
1130         }
1131         ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
1132
1133         switch (max_step_size) {
1134         case 256:
1135                 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
1136                 break;
1137         case 512:
1138                 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
1139                 break;
1140         case 1024:
1141                 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
1142                 break;
1143         case 2048:
1144                 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
1145                 break;
1146         case 4096:
1147                 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
1148                 break;
1149         default:
1150                 dev_err(cdns_ctrl->dev,
1151                         "Unsupported sector size(ecc step size) %d\n",
1152                         max_step_size);
1153                 return -EIO;
1154         }
1155
1156         return 0;
1157 }
1158
1159 /* Hardware initialization. */
1160 static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
1161 {
1162         int status;
1163         u32 reg;
1164
1165         status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1166                                              1000000,
1167                                              CTRL_STATUS_INIT_COMP, false);
1168         if (status)
1169                 return status;
1170
1171         reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
1172         cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
1173
1174         dev_info(cdns_ctrl->dev,
1175                  "%s: cadence nand controller version reg %x\n",
1176                  __func__, reg);
1177
1178         /* Disable cache and multiplane. */
1179         writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
1180         writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
1181
1182         /* Clear all interrupts. */
1183         writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
1184
1185         cadence_nand_get_caps(cdns_ctrl);
1186         if (cadence_nand_read_bch_caps(cdns_ctrl))
1187                 return -EIO;
1188
1189 #ifndef CONFIG_64BIT
1190         if (cdns_ctrl->caps2.data_dma_width == 8) {
1191                 dev_err(cdns_ctrl->dev,
1192                         "cannot access 64-bit dma on !64-bit architectures");
1193                 return -EIO;
1194         }
1195 #endif
1196
1197         /*
1198          * Set IO width access to 8.
1199          * It is because during SW device discovering width access
1200          * is expected to be 8.
1201          */
1202         status = cadence_nand_set_access_width16(cdns_ctrl, false);
1203
1204         return status;
1205 }
1206
1207 #define TT_MAIN_OOB_AREAS       2
1208 #define TT_RAW_PAGE             3
1209 #define TT_BBM                  4
1210 #define TT_MAIN_OOB_AREA_EXT    5
1211
1212 /* Prepare size of data to transfer. */
1213 static void
1214 cadence_nand_prepare_data_size(struct nand_chip *chip,
1215                                int transfer_type)
1216 {
1217         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1218         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1219         struct mtd_info *mtd = nand_to_mtd(chip);
1220         u32 sec_size = 0, offset = 0, sec_cnt = 1;
1221         u32 last_sec_size = cdns_chip->sector_size;
1222         u32 data_ctrl_size = 0;
1223         u32 reg = 0;
1224
1225         if (cdns_ctrl->curr_trans_type == transfer_type)
1226                 return;
1227
1228         switch (transfer_type) {
1229         case TT_MAIN_OOB_AREA_EXT:
1230                 sec_cnt = cdns_chip->sector_count;
1231                 sec_size = cdns_chip->sector_size;
1232                 data_ctrl_size = cdns_chip->avail_oob_size;
1233                 break;
1234         case TT_MAIN_OOB_AREAS:
1235                 sec_cnt = cdns_chip->sector_count;
1236                 last_sec_size = cdns_chip->sector_size
1237                         + cdns_chip->avail_oob_size;
1238                 sec_size = cdns_chip->sector_size;
1239                 break;
1240         case TT_RAW_PAGE:
1241                 last_sec_size = mtd->writesize + mtd->oobsize;
1242                 break;
1243         case TT_BBM:
1244                 offset = mtd->writesize + cdns_chip->bbm_offs;
1245                 last_sec_size = 8;
1246                 break;
1247         }
1248
1249         reg = 0;
1250         reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
1251         reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
1252         writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
1253
1254         reg = 0;
1255         reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
1256         reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
1257         writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
1258
1259         if (cdns_ctrl->caps2.data_control_supp) {
1260                 reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
1261                 reg &= ~CONTROL_DATA_CTRL_SIZE;
1262                 reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
1263                 writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
1264         }
1265
1266         cdns_ctrl->curr_trans_type = transfer_type;
1267 }
1268
1269 static int
1270 cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
1271                            int page, void *buf, void *ctrl_dat, u32 buf_size,
1272                            u32 ctrl_dat_size, enum dma_data_direction dir,
1273                            bool with_ecc)
1274 {
1275         dma_addr_t dma_buf, dma_ctrl_dat = 0;
1276         u8 thread_nr = chip_nr;
1277         int status;
1278         u16 ctype;
1279
1280         if (dir == DMA_FROM_DEVICE)
1281                 ctype = CDMA_CT_RD;
1282         else
1283                 ctype = CDMA_CT_WR;
1284
1285         cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
1286
1287         dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
1288         if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
1289                 dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1290                 return -EIO;
1291         }
1292
1293         if (ctrl_dat && ctrl_dat_size) {
1294                 dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
1295                                               ctrl_dat_size, dir);
1296                 if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
1297                         dma_unmap_single(cdns_ctrl->dev, dma_buf,
1298                                          buf_size, dir);
1299                         dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1300                         return -EIO;
1301                 }
1302         }
1303
1304         cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
1305                                        dma_buf, dma_ctrl_dat, ctype);
1306
1307         status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1308
1309         dma_unmap_single(cdns_ctrl->dev, dma_buf,
1310                          buf_size, dir);
1311
1312         if (ctrl_dat && ctrl_dat_size)
1313                 dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
1314                                  ctrl_dat_size, dir);
1315         if (status)
1316                 return status;
1317
1318         return cadence_nand_cdma_finish(cdns_ctrl);
1319 }
1320
1321 static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
1322                                      struct cadence_nand_timings *t)
1323 {
1324         writel_relaxed(t->async_toggle_timings,
1325                        cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
1326         writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
1327         writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
1328         writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
1329
1330         if (cdns_ctrl->caps2.is_phy_type_dll)
1331                 writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
1332
1333         writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
1334
1335         if (cdns_ctrl->caps2.is_phy_type_dll) {
1336                 writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
1337                 writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
1338                 writel_relaxed(t->phy_dqs_timing,
1339                                cdns_ctrl->reg + PHY_DQS_TIMING);
1340                 writel_relaxed(t->phy_gate_lpbk_ctrl,
1341                                cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
1342                 writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
1343                                cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
1344                 writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
1345         }
1346 }
1347
1348 static int cadence_nand_select_target(struct nand_chip *chip)
1349 {
1350         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1351         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1352
1353         if (chip == cdns_ctrl->selected_chip)
1354                 return 0;
1355
1356         if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1357                                         1000000,
1358                                         CTRL_STATUS_CTRL_BUSY, true))
1359                 return -ETIMEDOUT;
1360
1361         cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
1362
1363         cadence_nand_set_ecc_strength(cdns_ctrl,
1364                                       cdns_chip->corr_str_idx);
1365
1366         cadence_nand_set_erase_detection(cdns_ctrl, true,
1367                                          chip->ecc.strength);
1368
1369         cdns_ctrl->curr_trans_type = -1;
1370         cdns_ctrl->selected_chip = chip;
1371
1372         return 0;
1373 }
1374
1375 static int cadence_nand_erase(struct nand_chip *chip, u32 page)
1376 {
1377         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1378         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1379         int status;
1380         u8 thread_nr = cdns_chip->cs[chip->cur_cs];
1381
1382         cadence_nand_cdma_desc_prepare(cdns_ctrl,
1383                                        cdns_chip->cs[chip->cur_cs],
1384                                        page, 0, 0,
1385                                        CDMA_CT_ERASE);
1386         status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1387         if (status) {
1388                 dev_err(cdns_ctrl->dev, "erase operation failed\n");
1389                 return -EIO;
1390         }
1391
1392         status = cadence_nand_cdma_finish(cdns_ctrl);
1393         if (status)
1394                 return status;
1395
1396         return 0;
1397 }
1398
1399 static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
1400 {
1401         int status;
1402         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1403         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1404         struct mtd_info *mtd = nand_to_mtd(chip);
1405
1406         cadence_nand_prepare_data_size(chip, TT_BBM);
1407
1408         cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1409
1410         /*
1411          * Read only bad block marker from offset
1412          * defined by a memory manufacturer.
1413          */
1414         status = cadence_nand_cdma_transfer(cdns_ctrl,
1415                                             cdns_chip->cs[chip->cur_cs],
1416                                             page, cdns_ctrl->buf, NULL,
1417                                             mtd->oobsize,
1418                                             0, DMA_FROM_DEVICE, false);
1419         if (status) {
1420                 dev_err(cdns_ctrl->dev, "read BBM failed\n");
1421                 return -EIO;
1422         }
1423
1424         memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
1425
1426         return 0;
1427 }
1428
1429 static int cadence_nand_write_page(struct nand_chip *chip,
1430                                    const u8 *buf, int oob_required,
1431                                    int page)
1432 {
1433         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1434         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1435         struct mtd_info *mtd = nand_to_mtd(chip);
1436         int status;
1437         u16 marker_val = 0xFFFF;
1438
1439         status = cadence_nand_select_target(chip);
1440         if (status)
1441                 return status;
1442
1443         cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1444                                          mtd->writesize
1445                                          + cdns_chip->bbm_offs,
1446                                          1);
1447
1448         if (oob_required) {
1449                 marker_val = *(u16 *)(chip->oob_poi
1450                                       + cdns_chip->bbm_offs);
1451         } else {
1452                 /* Set oob data to 0xFF. */
1453                 memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
1454                        cdns_chip->avail_oob_size);
1455         }
1456
1457         cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
1458
1459         cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1460
1461         if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1462             cdns_ctrl->caps2.data_control_supp) {
1463                 u8 *oob;
1464
1465                 if (oob_required)
1466                         oob = chip->oob_poi;
1467                 else
1468                         oob = cdns_ctrl->buf + mtd->writesize;
1469
1470                 status = cadence_nand_cdma_transfer(cdns_ctrl,
1471                                                     cdns_chip->cs[chip->cur_cs],
1472                                                     page, (void *)buf, oob,
1473                                                     mtd->writesize,
1474                                                     cdns_chip->avail_oob_size,
1475                                                     DMA_TO_DEVICE, true);
1476                 if (status) {
1477                         dev_err(cdns_ctrl->dev, "write page failed\n");
1478                         return -EIO;
1479                 }
1480
1481                 return 0;
1482         }
1483
1484         if (oob_required) {
1485                 /* Transfer the data to the oob area. */
1486                 memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
1487                        cdns_chip->avail_oob_size);
1488         }
1489
1490         memcpy(cdns_ctrl->buf, buf, mtd->writesize);
1491
1492         cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1493
1494         return cadence_nand_cdma_transfer(cdns_ctrl,
1495                                           cdns_chip->cs[chip->cur_cs],
1496                                           page, cdns_ctrl->buf, NULL,
1497                                           mtd->writesize
1498                                           + cdns_chip->avail_oob_size,
1499                                           0, DMA_TO_DEVICE, true);
1500 }
1501
1502 static int cadence_nand_write_oob(struct nand_chip *chip, int page)
1503 {
1504         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1505         struct mtd_info *mtd = nand_to_mtd(chip);
1506
1507         memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
1508
1509         return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
1510 }
1511
1512 static int cadence_nand_write_page_raw(struct nand_chip *chip,
1513                                        const u8 *buf, int oob_required,
1514                                        int page)
1515 {
1516         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1517         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1518         struct mtd_info *mtd = nand_to_mtd(chip);
1519         int writesize = mtd->writesize;
1520         int oobsize = mtd->oobsize;
1521         int ecc_steps = chip->ecc.steps;
1522         int ecc_size = chip->ecc.size;
1523         int ecc_bytes = chip->ecc.bytes;
1524         void *tmp_buf = cdns_ctrl->buf;
1525         int oob_skip = cdns_chip->bbm_len;
1526         size_t size = writesize + oobsize;
1527         int i, pos, len;
1528         int status = 0;
1529
1530         status = cadence_nand_select_target(chip);
1531         if (status)
1532                 return status;
1533
1534         /*
1535          * Fill the buffer with 0xff first except the full page transfer.
1536          * This simplifies the logic.
1537          */
1538         if (!buf || !oob_required)
1539                 memset(tmp_buf, 0xff, size);
1540
1541         cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1542
1543         /* Arrange the buffer for syndrome payload/ecc layout. */
1544         if (buf) {
1545                 for (i = 0; i < ecc_steps; i++) {
1546                         pos = i * (ecc_size + ecc_bytes);
1547                         len = ecc_size;
1548
1549                         if (pos >= writesize)
1550                                 pos += oob_skip;
1551                         else if (pos + len > writesize)
1552                                 len = writesize - pos;
1553
1554                         memcpy(tmp_buf + pos, buf, len);
1555                         buf += len;
1556                         if (len < ecc_size) {
1557                                 len = ecc_size - len;
1558                                 memcpy(tmp_buf + writesize + oob_skip, buf,
1559                                        len);
1560                                 buf += len;
1561                         }
1562                 }
1563         }
1564
1565         if (oob_required) {
1566                 const u8 *oob = chip->oob_poi;
1567                 u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1568                         (cdns_chip->sector_size + chip->ecc.bytes)
1569                         + cdns_chip->sector_size + oob_skip;
1570
1571                 /* BBM at the beginning of the OOB area. */
1572                 memcpy(tmp_buf + writesize, oob, oob_skip);
1573
1574                 /* OOB free. */
1575                 memcpy(tmp_buf + oob_data_offset, oob,
1576                        cdns_chip->avail_oob_size);
1577                 oob += cdns_chip->avail_oob_size;
1578
1579                 /* OOB ECC. */
1580                 for (i = 0; i < ecc_steps; i++) {
1581                         pos = ecc_size + i * (ecc_size + ecc_bytes);
1582                         if (i == (ecc_steps - 1))
1583                                 pos += cdns_chip->avail_oob_size;
1584
1585                         len = ecc_bytes;
1586
1587                         if (pos >= writesize)
1588                                 pos += oob_skip;
1589                         else if (pos + len > writesize)
1590                                 len = writesize - pos;
1591
1592                         memcpy(tmp_buf + pos, oob, len);
1593                         oob += len;
1594                         if (len < ecc_bytes) {
1595                                 len = ecc_bytes - len;
1596                                 memcpy(tmp_buf + writesize + oob_skip, oob,
1597                                        len);
1598                                 oob += len;
1599                         }
1600                 }
1601         }
1602
1603         cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1604
1605         return cadence_nand_cdma_transfer(cdns_ctrl,
1606                                           cdns_chip->cs[chip->cur_cs],
1607                                           page, cdns_ctrl->buf, NULL,
1608                                           mtd->writesize +
1609                                           mtd->oobsize,
1610                                           0, DMA_TO_DEVICE, false);
1611 }
1612
1613 static int cadence_nand_write_oob_raw(struct nand_chip *chip,
1614                                       int page)
1615 {
1616         return cadence_nand_write_page_raw(chip, NULL, true, page);
1617 }
1618
1619 static int cadence_nand_read_page(struct nand_chip *chip,
1620                                   u8 *buf, int oob_required, int page)
1621 {
1622         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1623         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1624         struct mtd_info *mtd = nand_to_mtd(chip);
1625         int status = 0;
1626         int ecc_err_count = 0;
1627
1628         status = cadence_nand_select_target(chip);
1629         if (status)
1630                 return status;
1631
1632         cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1633                                          mtd->writesize
1634                                          + cdns_chip->bbm_offs, 1);
1635
1636         /*
1637          * If data buffer can be accessed by DMA and data_control feature
1638          * is supported then transfer data and oob directly.
1639          */
1640         if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1641             cdns_ctrl->caps2.data_control_supp) {
1642                 u8 *oob;
1643
1644                 if (oob_required)
1645                         oob = chip->oob_poi;
1646                 else
1647                         oob = cdns_ctrl->buf + mtd->writesize;
1648
1649                 cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1650                 status = cadence_nand_cdma_transfer(cdns_ctrl,
1651                                                     cdns_chip->cs[chip->cur_cs],
1652                                                     page, buf, oob,
1653                                                     mtd->writesize,
1654                                                     cdns_chip->avail_oob_size,
1655                                                     DMA_FROM_DEVICE, true);
1656         /* Otherwise use bounce buffer. */
1657         } else {
1658                 cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1659                 status = cadence_nand_cdma_transfer(cdns_ctrl,
1660                                                     cdns_chip->cs[chip->cur_cs],
1661                                                     page, cdns_ctrl->buf,
1662                                                     NULL, mtd->writesize
1663                                                     + cdns_chip->avail_oob_size,
1664                                                     0, DMA_FROM_DEVICE, true);
1665
1666                 memcpy(buf, cdns_ctrl->buf, mtd->writesize);
1667                 if (oob_required)
1668                         memcpy(chip->oob_poi,
1669                                cdns_ctrl->buf + mtd->writesize,
1670                                mtd->oobsize);
1671         }
1672
1673         switch (status) {
1674         case STAT_ECC_UNCORR:
1675                 mtd->ecc_stats.failed++;
1676                 ecc_err_count++;
1677                 break;
1678         case STAT_ECC_CORR:
1679                 ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
1680                                           cdns_ctrl->cdma_desc->status);
1681                 mtd->ecc_stats.corrected += ecc_err_count;
1682                 break;
1683         case STAT_ERASED:
1684         case STAT_OK:
1685                 break;
1686         default:
1687                 dev_err(cdns_ctrl->dev, "read page failed\n");
1688                 return -EIO;
1689         }
1690
1691         if (oob_required)
1692                 if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
1693                         return -EIO;
1694
1695         return ecc_err_count;
1696 }
1697
1698 /* Reads OOB data from the device. */
1699 static int cadence_nand_read_oob(struct nand_chip *chip, int page)
1700 {
1701         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1702
1703         return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
1704 }
1705
1706 static int cadence_nand_read_page_raw(struct nand_chip *chip,
1707                                       u8 *buf, int oob_required, int page)
1708 {
1709         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1710         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1711         struct mtd_info *mtd = nand_to_mtd(chip);
1712         int oob_skip = cdns_chip->bbm_len;
1713         int writesize = mtd->writesize;
1714         int ecc_steps = chip->ecc.steps;
1715         int ecc_size = chip->ecc.size;
1716         int ecc_bytes = chip->ecc.bytes;
1717         void *tmp_buf = cdns_ctrl->buf;
1718         int i, pos, len;
1719         int status = 0;
1720
1721         status = cadence_nand_select_target(chip);
1722         if (status)
1723                 return status;
1724
1725         cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1726
1727         cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1728         status = cadence_nand_cdma_transfer(cdns_ctrl,
1729                                             cdns_chip->cs[chip->cur_cs],
1730                                             page, cdns_ctrl->buf, NULL,
1731                                             mtd->writesize
1732                                             + mtd->oobsize,
1733                                             0, DMA_FROM_DEVICE, false);
1734
1735         switch (status) {
1736         case STAT_ERASED:
1737         case STAT_OK:
1738                 break;
1739         default:
1740                 dev_err(cdns_ctrl->dev, "read raw page failed\n");
1741                 return -EIO;
1742         }
1743
1744         /* Arrange the buffer for syndrome payload/ecc layout. */
1745         if (buf) {
1746                 for (i = 0; i < ecc_steps; i++) {
1747                         pos = i * (ecc_size + ecc_bytes);
1748                         len = ecc_size;
1749
1750                         if (pos >= writesize)
1751                                 pos += oob_skip;
1752                         else if (pos + len > writesize)
1753                                 len = writesize - pos;
1754
1755                         memcpy(buf, tmp_buf + pos, len);
1756                         buf += len;
1757                         if (len < ecc_size) {
1758                                 len = ecc_size - len;
1759                                 memcpy(buf, tmp_buf + writesize + oob_skip,
1760                                        len);
1761                                 buf += len;
1762                         }
1763                 }
1764         }
1765
1766         if (oob_required) {
1767                 u8 *oob = chip->oob_poi;
1768                 u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1769                         (cdns_chip->sector_size + chip->ecc.bytes)
1770                         + cdns_chip->sector_size + oob_skip;
1771
1772                 /* OOB free. */
1773                 memcpy(oob, tmp_buf + oob_data_offset,
1774                        cdns_chip->avail_oob_size);
1775
1776                 /* BBM at the beginning of the OOB area. */
1777                 memcpy(oob, tmp_buf + writesize, oob_skip);
1778
1779                 oob += cdns_chip->avail_oob_size;
1780
1781                 /* OOB ECC */
1782                 for (i = 0; i < ecc_steps; i++) {
1783                         pos = ecc_size + i * (ecc_size + ecc_bytes);
1784                         len = ecc_bytes;
1785
1786                         if (i == (ecc_steps - 1))
1787                                 pos += cdns_chip->avail_oob_size;
1788
1789                         if (pos >= writesize)
1790                                 pos += oob_skip;
1791                         else if (pos + len > writesize)
1792                                 len = writesize - pos;
1793
1794                         memcpy(oob, tmp_buf + pos, len);
1795                         oob += len;
1796                         if (len < ecc_bytes) {
1797                                 len = ecc_bytes - len;
1798                                 memcpy(oob, tmp_buf + writesize + oob_skip,
1799                                        len);
1800                                 oob += len;
1801                         }
1802                 }
1803         }
1804
1805         return 0;
1806 }
1807
1808 static int cadence_nand_read_oob_raw(struct nand_chip *chip,
1809                                      int page)
1810 {
1811         return cadence_nand_read_page_raw(chip, NULL, true, page);
1812 }
1813
1814 static void cadence_nand_slave_dma_transfer_finished(void *data)
1815 {
1816         struct completion *finished = data;
1817
1818         complete(finished);
1819 }
1820
1821 static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
1822                                            void *buf,
1823                                            dma_addr_t dev_dma, size_t len,
1824                                            enum dma_data_direction dir)
1825 {
1826         DECLARE_COMPLETION_ONSTACK(finished);
1827         struct dma_chan *chan;
1828         struct dma_device *dma_dev;
1829         dma_addr_t src_dma, dst_dma, buf_dma;
1830         struct dma_async_tx_descriptor *tx;
1831         dma_cookie_t cookie;
1832
1833         chan = cdns_ctrl->dmac;
1834         dma_dev = chan->device;
1835
1836         buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
1837         if (dma_mapping_error(dma_dev->dev, buf_dma)) {
1838                 dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1839                 goto err;
1840         }
1841
1842         if (dir == DMA_FROM_DEVICE) {
1843                 src_dma = cdns_ctrl->io.dma;
1844                 dst_dma = buf_dma;
1845         } else {
1846                 src_dma = buf_dma;
1847                 dst_dma = cdns_ctrl->io.dma;
1848         }
1849
1850         tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
1851                                        DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
1852         if (!tx) {
1853                 dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
1854                 goto err_unmap;
1855         }
1856
1857         tx->callback = cadence_nand_slave_dma_transfer_finished;
1858         tx->callback_param = &finished;
1859
1860         cookie = dmaengine_submit(tx);
1861         if (dma_submit_error(cookie)) {
1862                 dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
1863                 goto err_unmap;
1864         }
1865
1866         dma_async_issue_pending(cdns_ctrl->dmac);
1867         wait_for_completion(&finished);
1868
1869         dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
1870
1871         return 0;
1872
1873 err_unmap:
1874         dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
1875
1876 err:
1877         dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
1878
1879         return -EIO;
1880 }
1881
1882 static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
1883                                  u8 *buf, int len)
1884 {
1885         u8 thread_nr = 0;
1886         u32 sdma_size;
1887         int status;
1888
1889         /* Wait until slave DMA interface is ready to data transfer. */
1890         status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1891         if (status)
1892                 return status;
1893
1894         if (!cdns_ctrl->caps1->has_dma) {
1895                 u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
1896
1897                 int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
1898
1899                 /* read alingment data */
1900                 if (data_dma_width == 4)
1901                         ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1902 #ifdef CONFIG_64BIT
1903                 else
1904                         readsq(cdns_ctrl->io.virt, buf, len_in_words);
1905 #endif
1906
1907                 if (sdma_size > len) {
1908                         int read_bytes = (data_dma_width == 4) ?
1909                                 len_in_words << 2 : len_in_words << 3;
1910
1911                         /* read rest data from slave DMA interface if any */
1912                         if (data_dma_width == 4)
1913                                 ioread32_rep(cdns_ctrl->io.virt,
1914                                              cdns_ctrl->buf,
1915                                              sdma_size / 4 - len_in_words);
1916 #ifdef CONFIG_64BIT
1917                         else
1918                                 readsq(cdns_ctrl->io.virt, cdns_ctrl->buf,
1919                                        sdma_size / 8 - len_in_words);
1920 #endif
1921
1922                         /* copy rest of data */
1923                         memcpy(buf + read_bytes, cdns_ctrl->buf,
1924                                len - read_bytes);
1925                 }
1926                 return 0;
1927         }
1928
1929         if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
1930                 status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
1931                                                          cdns_ctrl->io.dma,
1932                                                          len, DMA_FROM_DEVICE);
1933                 if (status == 0)
1934                         return 0;
1935
1936                 dev_warn(cdns_ctrl->dev,
1937                          "Slave DMA transfer failed. Try again using bounce buffer.");
1938         }
1939
1940         /* If DMA transfer is not possible or failed then use bounce buffer. */
1941         status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
1942                                                  cdns_ctrl->io.dma,
1943                                                  sdma_size, DMA_FROM_DEVICE);
1944
1945         if (status) {
1946                 dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
1947                 return status;
1948         }
1949
1950         memcpy(buf, cdns_ctrl->buf, len);
1951
1952         return 0;
1953 }
1954
1955 static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
1956                                   const u8 *buf, int len)
1957 {
1958         u8 thread_nr = 0;
1959         u32 sdma_size;
1960         int status;
1961
1962         /* Wait until slave DMA interface is ready to data transfer. */
1963         status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1964         if (status)
1965                 return status;
1966
1967         if (!cdns_ctrl->caps1->has_dma) {
1968                 u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
1969
1970                 int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
1971
1972                 if (data_dma_width == 4)
1973                         iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1974 #ifdef CONFIG_64BIT
1975                 else
1976                         writesq(cdns_ctrl->io.virt, buf, len_in_words);
1977 #endif
1978
1979                 if (sdma_size > len) {
1980                         int written_bytes = (data_dma_width == 4) ?
1981                                 len_in_words << 2 : len_in_words << 3;
1982
1983                         /* copy rest of data */
1984                         memcpy(cdns_ctrl->buf, buf + written_bytes,
1985                                len - written_bytes);
1986
1987                         /* write all expected by nand controller data */
1988                         if (data_dma_width == 4)
1989                                 iowrite32_rep(cdns_ctrl->io.virt,
1990                                               cdns_ctrl->buf,
1991                                               sdma_size / 4 - len_in_words);
1992 #ifdef CONFIG_64BIT
1993                         else
1994                                 writesq(cdns_ctrl->io.virt, cdns_ctrl->buf,
1995                                         sdma_size / 8 - len_in_words);
1996 #endif
1997                 }
1998
1999                 return 0;
2000         }
2001
2002         if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
2003                 status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
2004                                                          cdns_ctrl->io.dma,
2005                                                          len, DMA_TO_DEVICE);
2006                 if (status == 0)
2007                         return 0;
2008
2009                 dev_warn(cdns_ctrl->dev,
2010                          "Slave DMA transfer failed. Try again using bounce buffer.");
2011         }
2012
2013         /* If DMA transfer is not possible or failed then use bounce buffer. */
2014         memcpy(cdns_ctrl->buf, buf, len);
2015
2016         status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
2017                                                  cdns_ctrl->io.dma,
2018                                                  sdma_size, DMA_TO_DEVICE);
2019
2020         if (status)
2021                 dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
2022
2023         return status;
2024 }
2025
2026 static int cadence_nand_force_byte_access(struct nand_chip *chip,
2027                                           bool force_8bit)
2028 {
2029         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2030
2031         /*
2032          * Callers of this function do not verify if the NAND is using a 16-bit
2033          * an 8-bit bus for normal operations, so we need to take care of that
2034          * here by leaving the configuration unchanged if the NAND does not have
2035          * the NAND_BUSWIDTH_16 flag set.
2036          */
2037         if (!(chip->options & NAND_BUSWIDTH_16))
2038                 return 0;
2039
2040         return cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
2041 }
2042
2043 static int cadence_nand_cmd_opcode(struct nand_chip *chip,
2044                                    const struct nand_subop *subop)
2045 {
2046         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2047         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2048         const struct nand_op_instr *instr;
2049         unsigned int op_id = 0;
2050         u64 mini_ctrl_cmd = 0;
2051         int ret;
2052
2053         instr = &subop->instrs[op_id];
2054
2055         if (instr->delay_ns > 0)
2056                 mini_ctrl_cmd |= GCMD_LAY_TWB;
2057
2058         mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2059                                     GCMD_LAY_INSTR_CMD);
2060         mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
2061                                     instr->ctx.cmd.opcode);
2062
2063         ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2064                                             cdns_chip->cs[chip->cur_cs],
2065                                             mini_ctrl_cmd);
2066         if (ret)
2067                 dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
2068                         instr->ctx.cmd.opcode);
2069
2070         return ret;
2071 }
2072
2073 static int cadence_nand_cmd_address(struct nand_chip *chip,
2074                                     const struct nand_subop *subop)
2075 {
2076         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2077         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2078         const struct nand_op_instr *instr;
2079         unsigned int op_id = 0;
2080         u64 mini_ctrl_cmd = 0;
2081         unsigned int offset, naddrs;
2082         u64 address = 0;
2083         const u8 *addrs;
2084         int ret;
2085         int i;
2086
2087         instr = &subop->instrs[op_id];
2088
2089         if (instr->delay_ns > 0)
2090                 mini_ctrl_cmd |= GCMD_LAY_TWB;
2091
2092         mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2093                                     GCMD_LAY_INSTR_ADDR);
2094
2095         offset = nand_subop_get_addr_start_off(subop, op_id);
2096         naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
2097         addrs = &instr->ctx.addr.addrs[offset];
2098
2099         for (i = 0; i < naddrs; i++)
2100                 address |= (u64)addrs[i] << (8 * i);
2101
2102         mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
2103                                     address);
2104         mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
2105                                     naddrs - 1);
2106
2107         ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2108                                             cdns_chip->cs[chip->cur_cs],
2109                                             mini_ctrl_cmd);
2110         if (ret)
2111                 dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
2112
2113         return ret;
2114 }
2115
2116 static int cadence_nand_cmd_erase(struct nand_chip *chip,
2117                                   const struct nand_subop *subop)
2118 {
2119         unsigned int op_id;
2120
2121         if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
2122                 int i;
2123                 const struct nand_op_instr *instr = NULL;
2124                 unsigned int offset, naddrs;
2125                 const u8 *addrs;
2126                 u32 page = 0;
2127
2128                 instr = &subop->instrs[1];
2129                 offset = nand_subop_get_addr_start_off(subop, 1);
2130                 naddrs = nand_subop_get_num_addr_cyc(subop, 1);
2131                 addrs = &instr->ctx.addr.addrs[offset];
2132
2133                 for (i = 0; i < naddrs; i++)
2134                         page |= (u32)addrs[i] << (8 * i);
2135
2136                 return cadence_nand_erase(chip, page);
2137         }
2138
2139         /*
2140          * If it is not an erase operation then handle operation
2141          * by calling exec_op function.
2142          */
2143         for (op_id = 0; op_id < subop->ninstrs; op_id++) {
2144                 int ret;
2145                 const struct nand_operation nand_op = {
2146                         .cs = chip->cur_cs,
2147                         .instrs =  &subop->instrs[op_id],
2148                         .ninstrs = 1};
2149                 ret = chip->controller->ops->exec_op(chip, &nand_op, false);
2150                 if (ret)
2151                         return ret;
2152         }
2153
2154         return 0;
2155 }
2156
2157 static int cadence_nand_cmd_data(struct nand_chip *chip,
2158                                  const struct nand_subop *subop)
2159 {
2160         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2161         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2162         const struct nand_op_instr *instr;
2163         unsigned int offset, op_id = 0;
2164         u64 mini_ctrl_cmd = 0;
2165         int len = 0;
2166         int ret;
2167
2168         instr = &subop->instrs[op_id];
2169
2170         if (instr->delay_ns > 0)
2171                 mini_ctrl_cmd |= GCMD_LAY_TWB;
2172
2173         mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2174                                     GCMD_LAY_INSTR_DATA);
2175
2176         if (instr->type == NAND_OP_DATA_OUT_INSTR)
2177                 mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
2178                                             GCMD_DIR_WRITE);
2179
2180         len = nand_subop_get_data_len(subop, op_id);
2181         offset = nand_subop_get_data_start_off(subop, op_id);
2182         mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
2183         mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
2184         if (instr->ctx.data.force_8bit) {
2185                 ret = cadence_nand_force_byte_access(chip, true);
2186                 if (ret) {
2187                         dev_err(cdns_ctrl->dev,
2188                                 "cannot change byte access generic data cmd failed\n");
2189                         return ret;
2190                 }
2191         }
2192
2193         ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2194                                             cdns_chip->cs[chip->cur_cs],
2195                                             mini_ctrl_cmd);
2196         if (ret) {
2197                 dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
2198                 return ret;
2199         }
2200
2201         if (instr->type == NAND_OP_DATA_IN_INSTR) {
2202                 void *buf = instr->ctx.data.buf.in + offset;
2203
2204                 ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
2205         } else {
2206                 const void *buf = instr->ctx.data.buf.out + offset;
2207
2208                 ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
2209         }
2210
2211         if (ret) {
2212                 dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
2213                 return ret;
2214         }
2215
2216         if (instr->ctx.data.force_8bit) {
2217                 ret = cadence_nand_force_byte_access(chip, false);
2218                 if (ret) {
2219                         dev_err(cdns_ctrl->dev,
2220                                 "cannot change byte access generic data cmd failed\n");
2221                 }
2222         }
2223
2224         return ret;
2225 }
2226
2227 static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
2228                                     const struct nand_subop *subop)
2229 {
2230         int status;
2231         unsigned int op_id = 0;
2232         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2233         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2234         const struct nand_op_instr *instr = &subop->instrs[op_id];
2235         u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
2236
2237         status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
2238                                              timeout_us,
2239                                              BIT(cdns_chip->cs[chip->cur_cs]),
2240                                              false);
2241         return status;
2242 }
2243
2244 static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
2245         NAND_OP_PARSER_PATTERN(
2246                 cadence_nand_cmd_erase,
2247                 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2248                 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
2249                 NAND_OP_PARSER_PAT_CMD_ELEM(false),
2250                 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2251         NAND_OP_PARSER_PATTERN(
2252                 cadence_nand_cmd_opcode,
2253                 NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2254         NAND_OP_PARSER_PATTERN(
2255                 cadence_nand_cmd_address,
2256                 NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
2257         NAND_OP_PARSER_PATTERN(
2258                 cadence_nand_cmd_data,
2259                 NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
2260         NAND_OP_PARSER_PATTERN(
2261                 cadence_nand_cmd_data,
2262                 NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
2263         NAND_OP_PARSER_PATTERN(
2264                 cadence_nand_cmd_waitrdy,
2265                 NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
2266         );
2267
2268 static int cadence_nand_exec_op(struct nand_chip *chip,
2269                                 const struct nand_operation *op,
2270                                 bool check_only)
2271 {
2272         if (!check_only) {
2273                 int status = cadence_nand_select_target(chip);
2274
2275                 if (status)
2276                         return status;
2277         }
2278
2279         return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
2280                                       check_only);
2281 }
2282
2283 static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
2284                                        struct mtd_oob_region *oobregion)
2285 {
2286         struct nand_chip *chip = mtd_to_nand(mtd);
2287         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2288
2289         if (section)
2290                 return -ERANGE;
2291
2292         oobregion->offset = cdns_chip->bbm_len;
2293         oobregion->length = cdns_chip->avail_oob_size
2294                 - cdns_chip->bbm_len;
2295
2296         return 0;
2297 }
2298
2299 static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2300                                       struct mtd_oob_region *oobregion)
2301 {
2302         struct nand_chip *chip = mtd_to_nand(mtd);
2303         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2304
2305         if (section)
2306                 return -ERANGE;
2307
2308         oobregion->offset = cdns_chip->avail_oob_size;
2309         oobregion->length = chip->ecc.total;
2310
2311         return 0;
2312 }
2313
2314 static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
2315         .free = cadence_nand_ooblayout_free,
2316         .ecc = cadence_nand_ooblayout_ecc,
2317 };
2318
2319 static int calc_cycl(u32 timing, u32 clock)
2320 {
2321         if (timing == 0 || clock == 0)
2322                 return 0;
2323
2324         if ((timing % clock) > 0)
2325                 return timing / clock;
2326         else
2327                 return timing / clock - 1;
2328 }
2329
2330 /* Calculate max data valid window. */
2331 static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2332                                 u32 board_delay_skew_min, u32 ext_mode)
2333 {
2334         if (ext_mode == 0)
2335                 clk_period /= 2;
2336
2337         return (trp_cnt + 1) * clk_period + trhoh_min +
2338                 board_delay_skew_min;
2339 }
2340
2341 /* Calculate data valid window. */
2342 static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2343                             u32 trea_max, u32 ext_mode)
2344 {
2345         if (ext_mode == 0)
2346                 clk_period /= 2;
2347
2348         return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
2349 }
2350
2351 static int
2352 cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
2353                              const struct nand_interface_config *conf)
2354 {
2355         const struct nand_sdr_timings *sdr;
2356         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2357         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2358         struct cadence_nand_timings *t = &cdns_chip->timings;
2359         u32 reg;
2360         u32 board_delay = cdns_ctrl->board_delay;
2361         u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
2362                                             cdns_ctrl->nf_clk_rate);
2363         u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
2364         u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
2365         u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
2366         u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
2367         u32 if_skew = cdns_ctrl->caps1->if_skew;
2368         u32 board_delay_skew_min = board_delay - if_skew;
2369         u32 board_delay_skew_max = board_delay + if_skew;
2370         u32 dqs_sampl_res, phony_dqs_mod;
2371         u32 tdvw, tdvw_min, tdvw_max;
2372         u32 ext_rd_mode, ext_wr_mode;
2373         u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
2374         u32 sampling_point;
2375
2376         sdr = nand_get_sdr_timings(conf);
2377         if (IS_ERR(sdr))
2378                 return PTR_ERR(sdr);
2379
2380         memset(t, 0, sizeof(*t));
2381         /* Sampling point calculation. */
2382
2383         if (cdns_ctrl->caps2.is_phy_type_dll)
2384                 phony_dqs_mod = 2;
2385         else
2386                 phony_dqs_mod = 1;
2387
2388         dqs_sampl_res = clk_period / phony_dqs_mod;
2389
2390         tdvw_min = sdr->tREA_max + board_delay_skew_max;
2391         /*
2392          * The idea of those calculation is to get the optimum value
2393          * for tRP and tRH timings. If it is NOT possible to sample data
2394          * with optimal tRP/tRH settings, the parameters will be extended.
2395          * If clk_period is 50ns (the lowest value) this condition is met
2396          * for SDR timing modes 1, 2, 3, 4 and 5.
2397          * If clk_period is 20ns the condition is met only for SDR timing
2398          * mode 5.
2399          */
2400         if (sdr->tRC_min <= clk_period &&
2401             sdr->tRP_min <= (clk_period / 2) &&
2402             sdr->tREH_min <= (clk_period / 2)) {
2403                 /* Performance mode. */
2404                 ext_rd_mode = 0;
2405                 tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2406                                  sdr->tREA_max, ext_rd_mode);
2407                 tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
2408                                          board_delay_skew_min,
2409                                          ext_rd_mode);
2410                 /*
2411                  * Check if data valid window and sampling point can be found
2412                  * and is not on the edge (ie. we have hold margin).
2413                  * If not extend the tRP timings.
2414                  */
2415                 if (tdvw > 0) {
2416                         if (tdvw_max <= tdvw_min ||
2417                             (tdvw_max % dqs_sampl_res) == 0) {
2418                                 /*
2419                                  * No valid sampling point so the RE pulse need
2420                                  * to be widen widening by half clock cycle.
2421                                  */
2422                                 ext_rd_mode = 1;
2423                         }
2424                 } else {
2425                         /*
2426                          * There is no valid window
2427                          * to be able to sample data the tRP need to be widen.
2428                          * Very safe calculations are performed here.
2429                          */
2430                         trp_cnt = (sdr->tREA_max + board_delay_skew_max
2431                                    + dqs_sampl_res) / clk_period;
2432                         ext_rd_mode = 1;
2433                 }
2434
2435         } else {
2436                 /* Extended read mode. */
2437                 u32 trh;
2438
2439                 ext_rd_mode = 1;
2440                 trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
2441                 trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
2442                 if (sdr->tREH_min >= trh)
2443                         trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
2444                 else
2445                         trh_cnt = calc_cycl(trh, clk_period);
2446
2447                 tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2448                                  sdr->tREA_max, ext_rd_mode);
2449                 /*
2450                  * Check if data valid window and sampling point can be found
2451                  * or if it is at the edge check if previous is valid
2452                  * - if not extend the tRP timings.
2453                  */
2454                 if (tdvw > 0) {
2455                         tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2456                                                  sdr->tRHOH_min,
2457                                                  board_delay_skew_min,
2458                                                  ext_rd_mode);
2459
2460                         if ((((tdvw_max / dqs_sampl_res)
2461                               * dqs_sampl_res) <= tdvw_min) ||
2462                             (((tdvw_max % dqs_sampl_res) == 0) &&
2463                              (((tdvw_max / dqs_sampl_res - 1)
2464                                * dqs_sampl_res) <= tdvw_min))) {
2465                                 /*
2466                                  * Data valid window width is lower than
2467                                  * sampling resolution and do not hit any
2468                                  * sampling point to be sure the sampling point
2469                                  * will be found the RE low pulse width will be
2470                                  *  extended by one clock cycle.
2471                                  */
2472                                 trp_cnt = trp_cnt + 1;
2473                         }
2474                 } else {
2475                         /*
2476                          * There is no valid window to be able to sample data.
2477                          * The tRP need to be widen.
2478                          * Very safe calculations are performed here.
2479                          */
2480                         trp_cnt = (sdr->tREA_max + board_delay_skew_max
2481                                    + dqs_sampl_res) / clk_period;
2482                 }
2483         }
2484
2485         tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2486                                  sdr->tRHOH_min,
2487                                  board_delay_skew_min, ext_rd_mode);
2488
2489         if (sdr->tWC_min <= clk_period &&
2490             (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
2491             (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
2492                 ext_wr_mode = 0;
2493         } else {
2494                 u32 twh;
2495
2496                 ext_wr_mode = 1;
2497                 twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
2498                 if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
2499                         twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
2500                                             clk_period);
2501
2502                 twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
2503                 if (sdr->tWH_min >= twh)
2504                         twh = sdr->tWH_min;
2505
2506                 twh_cnt = calc_cycl(twh + if_skew, clk_period);
2507         }
2508
2509         reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
2510         reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
2511         reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
2512         reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
2513         t->async_toggle_timings = reg;
2514         dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
2515
2516         tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
2517         tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
2518         twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
2519         trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
2520         reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
2521
2522         /*
2523          * If timing exceeds delay field in timing register
2524          * then use maximum value.
2525          */
2526         if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
2527                 reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
2528         else
2529                 reg |= TIMINGS0_TCCS;
2530
2531         reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
2532         reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
2533         t->timings0 = reg;
2534         dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
2535
2536         /* The following is related to single signal so skew is not needed. */
2537         trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
2538         trhz_cnt = trhz_cnt + 1;
2539         twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
2540         /*
2541          * Because of the two stage syncflop the value must be increased by 3
2542          * first value is related with sync, second value is related
2543          * with output if delay.
2544          */
2545         twb_cnt = twb_cnt + 3 + 5;
2546         /*
2547          * The following is related to the we edge of the random data input
2548          * sequence so skew is not needed.
2549          */
2550         tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
2551         reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
2552         reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
2553         reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
2554         t->timings1 = reg;
2555         dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
2556
2557         tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
2558         if (tfeat_cnt < twb_cnt)
2559                 tfeat_cnt = twb_cnt;
2560
2561         tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
2562         tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
2563
2564         reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
2565         reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
2566         reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
2567         t->timings2 = reg;
2568         dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
2569
2570         if (cdns_ctrl->caps2.is_phy_type_dll) {
2571                 reg = DLL_PHY_CTRL_DLL_RST_N;
2572                 if (ext_wr_mode)
2573                         reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
2574                 if (ext_rd_mode)
2575                         reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
2576
2577                 reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
2578                 reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
2579                 t->dll_phy_ctrl = reg;
2580                 dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
2581         }
2582
2583         /* Sampling point calculation. */
2584         if ((tdvw_max % dqs_sampl_res) > 0)
2585                 sampling_point = tdvw_max / dqs_sampl_res;
2586         else
2587                 sampling_point = (tdvw_max / dqs_sampl_res - 1);
2588
2589         if (sampling_point * dqs_sampl_res > tdvw_min) {
2590                 dll_phy_dqs_timing =
2591                         FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
2592                 dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
2593                 phony_dqs_timing = sampling_point / phony_dqs_mod;
2594
2595                 if ((sampling_point % 2) > 0) {
2596                         dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
2597                         if ((tdvw_max % dqs_sampl_res) == 0)
2598                                 /*
2599                                  * Calculation for sampling point at the edge
2600                                  * of data and being odd number.
2601                                  */
2602                                 phony_dqs_timing = (tdvw_max / dqs_sampl_res)
2603                                         / phony_dqs_mod - 1;
2604
2605                         if (!cdns_ctrl->caps2.is_phy_type_dll)
2606                                 phony_dqs_timing--;
2607
2608                 } else {
2609                         phony_dqs_timing--;
2610                 }
2611                 rd_del_sel = phony_dqs_timing + 3;
2612         } else {
2613                 dev_warn(cdns_ctrl->dev,
2614                          "ERROR : cannot find valid sampling point\n");
2615         }
2616
2617         reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
2618         if (cdns_ctrl->caps2.is_phy_type_dll)
2619                 reg  |= PHY_CTRL_SDR_DQS;
2620         t->phy_ctrl = reg;
2621         dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
2622
2623         if (cdns_ctrl->caps2.is_phy_type_dll) {
2624                 dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
2625                 dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
2626                 dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
2627                         dll_phy_dqs_timing);
2628                 t->phy_dqs_timing = dll_phy_dqs_timing;
2629
2630                 reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
2631                 dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
2632                         reg);
2633                 t->phy_gate_lpbk_ctrl = reg;
2634
2635                 dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
2636                         PHY_DLL_MASTER_CTRL_BYPASS_MODE);
2637                 dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
2638         }
2639
2640         return 0;
2641 }
2642
2643 static int cadence_nand_attach_chip(struct nand_chip *chip)
2644 {
2645         struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2646         struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2647         u32 ecc_size;
2648         struct mtd_info *mtd = nand_to_mtd(chip);
2649         int ret;
2650
2651         if (chip->options & NAND_BUSWIDTH_16) {
2652                 ret = cadence_nand_set_access_width16(cdns_ctrl, true);
2653                 if (ret)
2654                         return ret;
2655         }
2656
2657         chip->bbt_options |= NAND_BBT_USE_FLASH;
2658         chip->bbt_options |= NAND_BBT_NO_OOB;
2659         chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2660
2661         chip->options |= NAND_NO_SUBPAGE_WRITE;
2662
2663         cdns_chip->bbm_offs = chip->badblockpos;
2664         cdns_chip->bbm_offs &= ~0x01;
2665         /* this value should be even number */
2666         cdns_chip->bbm_len = 2;
2667
2668         ret = nand_ecc_choose_conf(chip,
2669                                    &cdns_ctrl->ecc_caps,
2670                                    mtd->oobsize - cdns_chip->bbm_len);
2671         if (ret) {
2672                 dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
2673                 return ret;
2674         }
2675
2676         dev_dbg(cdns_ctrl->dev,
2677                 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
2678                 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
2679
2680         /* Error correction configuration. */
2681         cdns_chip->sector_size = chip->ecc.size;
2682         cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
2683         ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
2684
2685         cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
2686
2687         if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
2688                 cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
2689
2690         if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
2691             > mtd->oobsize)
2692                 cdns_chip->avail_oob_size -= 4;
2693
2694         ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
2695         if (ret < 0)
2696                 return -EINVAL;
2697
2698         cdns_chip->corr_str_idx = (u8)ret;
2699
2700         if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
2701                                         1000000,
2702                                         CTRL_STATUS_CTRL_BUSY, true))
2703                 return -ETIMEDOUT;
2704
2705         cadence_nand_set_ecc_strength(cdns_ctrl,
2706                                       cdns_chip->corr_str_idx);
2707
2708         cadence_nand_set_erase_detection(cdns_ctrl, true,
2709                                          chip->ecc.strength);
2710
2711         /* Override the default read operations. */
2712         chip->ecc.read_page = cadence_nand_read_page;
2713         chip->ecc.read_page_raw = cadence_nand_read_page_raw;
2714         chip->ecc.write_page = cadence_nand_write_page;
2715         chip->ecc.write_page_raw = cadence_nand_write_page_raw;
2716         chip->ecc.read_oob = cadence_nand_read_oob;
2717         chip->ecc.write_oob = cadence_nand_write_oob;
2718         chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
2719         chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
2720
2721         if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
2722                 cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
2723
2724         /* Is 32-bit DMA supported? */
2725         ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
2726         if (ret) {
2727                 dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
2728                 return ret;
2729         }
2730
2731         mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
2732
2733         return 0;
2734 }
2735
2736 static const struct nand_controller_ops cadence_nand_controller_ops = {
2737         .attach_chip = cadence_nand_attach_chip,
2738         .exec_op = cadence_nand_exec_op,
2739         .setup_interface = cadence_nand_setup_interface,
2740 };
2741
2742 static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
2743                                   struct device_node *np)
2744 {
2745         struct cdns_nand_chip *cdns_chip;
2746         struct mtd_info *mtd;
2747         struct nand_chip *chip;
2748         int nsels, ret, i;
2749         u32 cs;
2750
2751         nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2752         if (nsels <= 0) {
2753                 dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
2754                 return -EINVAL;
2755         }
2756
2757         /* Allocate the nand chip structure. */
2758         cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
2759                                  (nsels * sizeof(u8)),
2760                                  GFP_KERNEL);
2761         if (!cdns_chip) {
2762                 dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
2763                 return -ENOMEM;
2764         }
2765
2766         cdns_chip->nsels = nsels;
2767
2768         for (i = 0; i < nsels; i++) {
2769                 /* Retrieve CS id. */
2770                 ret = of_property_read_u32_index(np, "reg", i, &cs);
2771                 if (ret) {
2772                         dev_err(cdns_ctrl->dev,
2773                                 "could not retrieve reg property: %d\n",
2774                                 ret);
2775                         return ret;
2776                 }
2777
2778                 if (cs >= cdns_ctrl->caps2.max_banks) {
2779                         dev_err(cdns_ctrl->dev,
2780                                 "invalid reg value: %u (max CS = %d)\n",
2781                                 cs, cdns_ctrl->caps2.max_banks);
2782                         return -EINVAL;
2783                 }
2784
2785                 if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
2786                         dev_err(cdns_ctrl->dev,
2787                                 "CS %d already assigned\n", cs);
2788                         return -EINVAL;
2789                 }
2790
2791                 cdns_chip->cs[i] = cs;
2792         }
2793
2794         chip = &cdns_chip->chip;
2795         chip->controller = &cdns_ctrl->controller;
2796         nand_set_flash_node(chip, np);
2797
2798         mtd = nand_to_mtd(chip);
2799         mtd->dev.parent = cdns_ctrl->dev;
2800
2801         /*
2802          * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2803          * in the DT node, this entry will be overwritten in nand_scan_ident().
2804          */
2805         chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2806
2807         ret = nand_scan(chip, cdns_chip->nsels);
2808         if (ret) {
2809                 dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
2810                 return ret;
2811         }
2812
2813         ret = mtd_device_register(mtd, NULL, 0);
2814         if (ret) {
2815                 dev_err(cdns_ctrl->dev,
2816                         "failed to register mtd device: %d\n", ret);
2817                 nand_cleanup(chip);
2818                 return ret;
2819         }
2820
2821         list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
2822
2823         return 0;
2824 }
2825
2826 static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
2827 {
2828         struct cdns_nand_chip *entry, *temp;
2829         struct nand_chip *chip;
2830         int ret;
2831
2832         list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
2833                 chip = &entry->chip;
2834                 ret = mtd_device_unregister(nand_to_mtd(chip));
2835                 WARN_ON(ret);
2836                 nand_cleanup(chip);
2837                 list_del(&entry->node);
2838         }
2839 }
2840
2841 static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
2842 {
2843         struct device_node *np = cdns_ctrl->dev->of_node;
2844         struct device_node *nand_np;
2845         int max_cs = cdns_ctrl->caps2.max_banks;
2846         int nchips, ret;
2847
2848         nchips = of_get_child_count(np);
2849
2850         if (nchips > max_cs) {
2851                 dev_err(cdns_ctrl->dev,
2852                         "too many NAND chips: %d (max = %d CS)\n",
2853                         nchips, max_cs);
2854                 return -EINVAL;
2855         }
2856
2857         for_each_child_of_node(np, nand_np) {
2858                 ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
2859                 if (ret) {
2860                         of_node_put(nand_np);
2861                         cadence_nand_chips_cleanup(cdns_ctrl);
2862                         return ret;
2863                 }
2864         }
2865
2866         return 0;
2867 }
2868
2869 static void
2870 cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
2871 {
2872         /* Disable interrupts. */
2873         writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
2874 }
2875
2876 static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
2877 {
2878         dma_cap_mask_t mask;
2879         int ret;
2880
2881         cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
2882                                                   sizeof(*cdns_ctrl->cdma_desc),
2883                                                   &cdns_ctrl->dma_cdma_desc,
2884                                                   GFP_KERNEL);
2885         if (!cdns_ctrl->dma_cdma_desc)
2886                 return -ENOMEM;
2887
2888         cdns_ctrl->buf_size = SZ_16K;
2889         cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
2890         if (!cdns_ctrl->buf) {
2891                 ret = -ENOMEM;
2892                 goto free_buf_desc;
2893         }
2894
2895         if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
2896                              IRQF_SHARED, "cadence-nand-controller",
2897                              cdns_ctrl)) {
2898                 dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
2899                 ret = -ENODEV;
2900                 goto free_buf;
2901         }
2902
2903         spin_lock_init(&cdns_ctrl->irq_lock);
2904         init_completion(&cdns_ctrl->complete);
2905
2906         ret = cadence_nand_hw_init(cdns_ctrl);
2907         if (ret)
2908                 goto disable_irq;
2909
2910         dma_cap_zero(mask);
2911         dma_cap_set(DMA_MEMCPY, mask);
2912
2913         if (cdns_ctrl->caps1->has_dma) {
2914                 cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
2915                 if (!cdns_ctrl->dmac) {
2916                         dev_err(cdns_ctrl->dev,
2917                                 "Unable to get a DMA channel\n");
2918                         ret = -EBUSY;
2919                         goto disable_irq;
2920                 }
2921         }
2922
2923         nand_controller_init(&cdns_ctrl->controller);
2924         INIT_LIST_HEAD(&cdns_ctrl->chips);
2925
2926         cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
2927         cdns_ctrl->curr_corr_str_idx = 0xFF;
2928
2929         ret = cadence_nand_chips_init(cdns_ctrl);
2930         if (ret) {
2931                 dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
2932                         ret);
2933                 goto dma_release_chnl;
2934         }
2935
2936         kfree(cdns_ctrl->buf);
2937         cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
2938         if (!cdns_ctrl->buf) {
2939                 ret = -ENOMEM;
2940                 goto dma_release_chnl;
2941         }
2942
2943         return 0;
2944
2945 dma_release_chnl:
2946         if (cdns_ctrl->dmac)
2947                 dma_release_channel(cdns_ctrl->dmac);
2948
2949 disable_irq:
2950         cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
2951
2952 free_buf:
2953         kfree(cdns_ctrl->buf);
2954
2955 free_buf_desc:
2956         dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
2957                           cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
2958
2959         return ret;
2960 }
2961
2962 /* Driver exit point. */
2963 static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
2964 {
2965         cadence_nand_chips_cleanup(cdns_ctrl);
2966         cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
2967         kfree(cdns_ctrl->buf);
2968         dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
2969                           cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
2970
2971         if (cdns_ctrl->dmac)
2972                 dma_release_channel(cdns_ctrl->dmac);
2973 }
2974
2975 struct cadence_nand_dt {
2976         struct cdns_nand_ctrl cdns_ctrl;
2977         struct clk *clk;
2978 };
2979
2980 static const struct cadence_nand_dt_devdata cadence_nand_default = {
2981         .if_skew = 0,
2982         .has_dma = 1,
2983 };
2984
2985 static const struct of_device_id cadence_nand_dt_ids[] = {
2986         {
2987                 .compatible = "cdns,hp-nfc",
2988                 .data = &cadence_nand_default
2989         }, {}
2990 };
2991
2992 MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
2993
2994 static int cadence_nand_dt_probe(struct platform_device *ofdev)
2995 {
2996         struct resource *res;
2997         struct cadence_nand_dt *dt;
2998         struct cdns_nand_ctrl *cdns_ctrl;
2999         int ret;
3000         const struct cadence_nand_dt_devdata *devdata;
3001         u32 val;
3002
3003         devdata = device_get_match_data(&ofdev->dev);
3004         if (!devdata) {
3005                 pr_err("Failed to find the right device id.\n");
3006                 return -ENOMEM;
3007         }
3008
3009         dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
3010         if (!dt)
3011                 return -ENOMEM;
3012
3013         cdns_ctrl = &dt->cdns_ctrl;
3014         cdns_ctrl->caps1 = devdata;
3015
3016         cdns_ctrl->dev = &ofdev->dev;
3017         cdns_ctrl->irq = platform_get_irq(ofdev, 0);
3018         if (cdns_ctrl->irq < 0)
3019                 return cdns_ctrl->irq;
3020
3021         dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
3022
3023         cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
3024         if (IS_ERR(cdns_ctrl->reg))
3025                 return PTR_ERR(cdns_ctrl->reg);
3026
3027         cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
3028         if (IS_ERR(cdns_ctrl->io.virt))
3029                 return PTR_ERR(cdns_ctrl->io.virt);
3030         cdns_ctrl->io.dma = res->start;
3031
3032         dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
3033         if (IS_ERR(dt->clk))
3034                 return PTR_ERR(dt->clk);
3035
3036         cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
3037
3038         ret = of_property_read_u32(ofdev->dev.of_node,
3039                                    "cdns,board-delay-ps", &val);
3040         if (ret) {
3041                 val = 4830;
3042                 dev_info(cdns_ctrl->dev,
3043                          "missing cdns,board-delay-ps property, %d was set\n",
3044                          val);
3045         }
3046         cdns_ctrl->board_delay = val;
3047
3048         ret = cadence_nand_init(cdns_ctrl);
3049         if (ret)
3050                 return ret;
3051
3052         platform_set_drvdata(ofdev, dt);
3053         return 0;
3054 }
3055
3056 static void cadence_nand_dt_remove(struct platform_device *ofdev)
3057 {
3058         struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
3059
3060         cadence_nand_remove(&dt->cdns_ctrl);
3061 }
3062
3063 static struct platform_driver cadence_nand_dt_driver = {
3064         .probe          = cadence_nand_dt_probe,
3065         .remove_new     = cadence_nand_dt_remove,
3066         .driver         = {
3067                 .name   = "cadence-nand-controller",
3068                 .of_match_table = cadence_nand_dt_ids,
3069         },
3070 };
3071
3072 module_platform_driver(cadence_nand_dt_driver);
3073
3074 MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
3075 MODULE_LICENSE("GPL v2");
3076 MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
3077