1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   *  Copyright 2017 - Free Electrons
4   *
5   *  Authors:
6   *	Boris Brezillon <boris.brezillon@free-electrons.com>
7   *	Peter Pan <peterpandong@micron.com>
8   */
9  
10  #ifndef __LINUX_MTD_NAND_H
11  #define __LINUX_MTD_NAND_H
12  
13  #include <linux/mtd/mtd.h>
14  
15  struct nand_device;
16  
17  /**
18   * struct nand_memory_organization - Memory organization structure
19   * @bits_per_cell: number of bits per NAND cell
20   * @pagesize: page size
21   * @oobsize: OOB area size
22   * @pages_per_eraseblock: number of pages per eraseblock
23   * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
24   * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
25   * @planes_per_lun: number of planes per LUN
26   * @luns_per_target: number of LUN per target (target is a synonym for die)
27   * @ntargets: total number of targets exposed by the NAND device
28   */
29  struct nand_memory_organization {
30  	unsigned int bits_per_cell;
31  	unsigned int pagesize;
32  	unsigned int oobsize;
33  	unsigned int pages_per_eraseblock;
34  	unsigned int eraseblocks_per_lun;
35  	unsigned int max_bad_eraseblocks_per_lun;
36  	unsigned int planes_per_lun;
37  	unsigned int luns_per_target;
38  	unsigned int ntargets;
39  };
40  
41  #define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt)	\
42  	{							\
43  		.bits_per_cell = (bpc),				\
44  		.pagesize = (ps),				\
45  		.oobsize = (os),				\
46  		.pages_per_eraseblock = (ppe),			\
47  		.eraseblocks_per_lun = (epl),			\
48  		.max_bad_eraseblocks_per_lun = (mbb),		\
49  		.planes_per_lun = (ppl),			\
50  		.luns_per_target = (lpt),			\
51  		.ntargets = (nt),				\
52  	}
53  
54  /**
55   * struct nand_row_converter - Information needed to convert an absolute offset
56   *			       into a row address
57   * @lun_addr_shift: position of the LUN identifier in the row address
58   * @eraseblock_addr_shift: position of the eraseblock identifier in the row
59   *			   address
60   */
61  struct nand_row_converter {
62  	unsigned int lun_addr_shift;
63  	unsigned int eraseblock_addr_shift;
64  };
65  
66  /**
67   * struct nand_pos - NAND position object
68   * @target: the NAND target/die
69   * @lun: the LUN identifier
70   * @plane: the plane within the LUN
71   * @eraseblock: the eraseblock within the LUN
72   * @page: the page within the LUN
73   *
74   * These information are usually used by specific sub-layers to select the
75   * appropriate target/die and generate a row address to pass to the device.
76   */
77  struct nand_pos {
78  	unsigned int target;
79  	unsigned int lun;
80  	unsigned int plane;
81  	unsigned int eraseblock;
82  	unsigned int page;
83  };
84  
85  /**
86   * enum nand_page_io_req_type - Direction of an I/O request
87   * @NAND_PAGE_READ: from the chip, to the controller
88   * @NAND_PAGE_WRITE: from the controller, to the chip
89   */
90  enum nand_page_io_req_type {
91  	NAND_PAGE_READ = 0,
92  	NAND_PAGE_WRITE,
93  };
94  
95  /**
96   * struct nand_page_io_req - NAND I/O request object
97   * @type: the type of page I/O: read or write
98   * @pos: the position this I/O request is targeting
99   * @dataoffs: the offset within the page
100   * @datalen: number of data bytes to read from/write to this page
101   * @databuf: buffer to store data in or get data from
102   * @ooboffs: the OOB offset within the page
103   * @ooblen: the number of OOB bytes to read from/write to this page
104   * @oobbuf: buffer to store OOB data in or get OOB data from
105   * @mode: one of the %MTD_OPS_XXX mode
106   * @continuous: no need to start over the operation at the end of each page, the
107   * NAND device will automatically prepare the next one
108   *
109   * This object is used to pass per-page I/O requests to NAND sub-layers. This
110   * way all useful information are already formatted in a useful way and
111   * specific NAND layers can focus on translating these information into
112   * specific commands/operations.
113   */
114  struct nand_page_io_req {
115  	enum nand_page_io_req_type type;
116  	struct nand_pos pos;
117  	unsigned int dataoffs;
118  	unsigned int datalen;
119  	union {
120  		const void *out;
121  		void *in;
122  	} databuf;
123  	unsigned int ooboffs;
124  	unsigned int ooblen;
125  	union {
126  		const void *out;
127  		void *in;
128  	} oobbuf;
129  	int mode;
130  	bool continuous;
131  };
132  
133  const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void);
134  const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void);
135  const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void);
136  
137  /**
138   * enum nand_ecc_engine_type - NAND ECC engine type
139   * @NAND_ECC_ENGINE_TYPE_INVALID: Invalid value
140   * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
141   * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
142   * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
143   * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
144   */
145  enum nand_ecc_engine_type {
146  	NAND_ECC_ENGINE_TYPE_INVALID,
147  	NAND_ECC_ENGINE_TYPE_NONE,
148  	NAND_ECC_ENGINE_TYPE_SOFT,
149  	NAND_ECC_ENGINE_TYPE_ON_HOST,
150  	NAND_ECC_ENGINE_TYPE_ON_DIE,
151  };
152  
153  /**
154   * enum nand_ecc_placement - NAND ECC bytes placement
155   * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
156   * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
157   * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
158   *                                  interleaved with regular data in the main
159   *                                  area
160   */
161  enum nand_ecc_placement {
162  	NAND_ECC_PLACEMENT_UNKNOWN,
163  	NAND_ECC_PLACEMENT_OOB,
164  	NAND_ECC_PLACEMENT_INTERLEAVED,
165  };
166  
167  /**
168   * enum nand_ecc_algo - NAND ECC algorithm
169   * @NAND_ECC_ALGO_UNKNOWN: Unknown algorithm
170   * @NAND_ECC_ALGO_HAMMING: Hamming algorithm
171   * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
172   * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
173   */
174  enum nand_ecc_algo {
175  	NAND_ECC_ALGO_UNKNOWN,
176  	NAND_ECC_ALGO_HAMMING,
177  	NAND_ECC_ALGO_BCH,
178  	NAND_ECC_ALGO_RS,
179  };
180  
181  /**
182   * struct nand_ecc_props - NAND ECC properties
183   * @engine_type: ECC engine type
184   * @placement: OOB placement (if relevant)
185   * @algo: ECC algorithm (if relevant)
186   * @strength: ECC strength
187   * @step_size: Number of bytes per step
188   * @flags: Misc properties
189   */
190  struct nand_ecc_props {
191  	enum nand_ecc_engine_type engine_type;
192  	enum nand_ecc_placement placement;
193  	enum nand_ecc_algo algo;
194  	unsigned int strength;
195  	unsigned int step_size;
196  	unsigned int flags;
197  };
198  
199  #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
200  
201  /* NAND ECC misc flags */
202  #define NAND_ECC_MAXIMIZE_STRENGTH BIT(0)
203  
204  /**
205   * struct nand_bbt - bad block table object
206   * @cache: in memory BBT cache
207   */
208  struct nand_bbt {
209  	unsigned long *cache;
210  };
211  
212  /**
213   * struct nand_ops - NAND operations
214   * @erase: erase a specific block. No need to check if the block is bad before
215   *	   erasing, this has been taken care of by the generic NAND layer
216   * @markbad: mark a specific block bad. No need to check if the block is
217   *	     already marked bad, this has been taken care of by the generic
218   *	     NAND layer. This method should just write the BBM (Bad Block
219   *	     Marker) so that future call to struct_nand_ops->isbad() return
220   *	     true
221   * @isbad: check whether a block is bad or not. This method should just read
222   *	   the BBM and return whether the block is bad or not based on what it
223   *	   reads
224   *
225   * These are all low level operations that should be implemented by specialized
226   * NAND layers (SPI NAND, raw NAND, ...).
227   */
228  struct nand_ops {
229  	int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
230  	int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
231  	bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
232  };
233  
234  /**
235   * struct nand_ecc_context - Context for the ECC engine
236   * @conf: basic ECC engine parameters
237   * @nsteps: number of ECC steps
238   * @total: total number of bytes used for storing ECC codes, this is used by
239   *         generic OOB layouts
240   * @priv: ECC engine driver private data
241   */
242  struct nand_ecc_context {
243  	struct nand_ecc_props conf;
244  	unsigned int nsteps;
245  	unsigned int total;
246  	void *priv;
247  };
248  
249  /**
250   * struct nand_ecc_engine_ops - ECC engine operations
251   * @init_ctx: given a desired user configuration for the pointed NAND device,
252   *            requests the ECC engine driver to setup a configuration with
253   *            values it supports.
254   * @cleanup_ctx: clean the context initialized by @init_ctx.
255   * @prepare_io_req: is called before reading/writing a page to prepare the I/O
256   *                  request to be performed with ECC correction.
257   * @finish_io_req: is called after reading/writing a page to terminate the I/O
258   *                 request and ensure proper ECC correction.
259   */
260  struct nand_ecc_engine_ops {
261  	int (*init_ctx)(struct nand_device *nand);
262  	void (*cleanup_ctx)(struct nand_device *nand);
263  	int (*prepare_io_req)(struct nand_device *nand,
264  			      struct nand_page_io_req *req);
265  	int (*finish_io_req)(struct nand_device *nand,
266  			     struct nand_page_io_req *req);
267  };
268  
269  /**
270   * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated
271   * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value
272   * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly
273   *                                         correction, does not need to copy
274   *                                         data around
275   * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the
276   *                                        data into its own area before use
277   */
278  enum nand_ecc_engine_integration {
279  	NAND_ECC_ENGINE_INTEGRATION_INVALID,
280  	NAND_ECC_ENGINE_INTEGRATION_PIPELINED,
281  	NAND_ECC_ENGINE_INTEGRATION_EXTERNAL,
282  };
283  
284  /**
285   * struct nand_ecc_engine - ECC engine abstraction for NAND devices
286   * @dev: Host device
287   * @node: Private field for registration time
288   * @ops: ECC engine operations
289   * @integration: How the engine is integrated with the host
290   *               (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines)
291   * @priv: Private data
292   */
293  struct nand_ecc_engine {
294  	struct device *dev;
295  	struct list_head node;
296  	struct nand_ecc_engine_ops *ops;
297  	enum nand_ecc_engine_integration integration;
298  	void *priv;
299  };
300  
301  void of_get_nand_ecc_user_config(struct nand_device *nand);
302  int nand_ecc_init_ctx(struct nand_device *nand);
303  void nand_ecc_cleanup_ctx(struct nand_device *nand);
304  int nand_ecc_prepare_io_req(struct nand_device *nand,
305  			    struct nand_page_io_req *req);
306  int nand_ecc_finish_io_req(struct nand_device *nand,
307  			   struct nand_page_io_req *req);
308  bool nand_ecc_is_strong_enough(struct nand_device *nand);
309  
310  #if IS_REACHABLE(CONFIG_MTD_NAND_CORE)
311  int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine);
312  int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine);
313  #else
314  static inline int
nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine * engine)315  nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine)
316  {
317  	return -ENOTSUPP;
318  }
319  static inline int
nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine * engine)320  nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine)
321  {
322  	return -ENOTSUPP;
323  }
324  #endif
325  
326  struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand);
327  struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand);
328  struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand);
329  void nand_ecc_put_on_host_hw_engine(struct nand_device *nand);
330  struct device *nand_ecc_get_engine_dev(struct device *host);
331  
332  #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING)
333  struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void);
334  #else
nand_ecc_sw_hamming_get_engine(void)335  static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void)
336  {
337  	return NULL;
338  }
339  #endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */
340  
341  #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
342  struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void);
343  #else
nand_ecc_sw_bch_get_engine(void)344  static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void)
345  {
346  	return NULL;
347  }
348  #endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
349  
350  /**
351   * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests
352   * @orig_req: Pointer to the original IO request
353   * @nand: Related NAND device, to have access to its memory organization
354   * @page_buffer_size: Real size of the page buffer to use (can be set by the
355   *                    user before the tweaking mechanism initialization)
356   * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the
357   *                   user before the tweaking mechanism initialization)
358   * @spare_databuf: Data bounce buffer
359   * @spare_oobbuf: OOB bounce buffer
360   * @bounce_data: Flag indicating a data bounce buffer is used
361   * @bounce_oob: Flag indicating an OOB bounce buffer is used
362   */
363  struct nand_ecc_req_tweak_ctx {
364  	struct nand_page_io_req orig_req;
365  	struct nand_device *nand;
366  	unsigned int page_buffer_size;
367  	unsigned int oob_buffer_size;
368  	void *spare_databuf;
369  	void *spare_oobbuf;
370  	bool bounce_data;
371  	bool bounce_oob;
372  };
373  
374  int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
375  			       struct nand_device *nand);
376  void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx);
377  void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
378  			struct nand_page_io_req *req);
379  void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
380  			  struct nand_page_io_req *req);
381  
382  /**
383   * struct nand_ecc - Information relative to the ECC
384   * @defaults: Default values, depend on the underlying subsystem
385   * @requirements: ECC requirements from the NAND chip perspective
386   * @user_conf: User desires in terms of ECC parameters
387   * @ctx: ECC context for the ECC engine, derived from the device @requirements
388   *       the @user_conf and the @defaults
389   * @ondie_engine: On-die ECC engine reference, if any
390   * @engine: ECC engine actually bound
391   */
392  struct nand_ecc {
393  	struct nand_ecc_props defaults;
394  	struct nand_ecc_props requirements;
395  	struct nand_ecc_props user_conf;
396  	struct nand_ecc_context ctx;
397  	struct nand_ecc_engine *ondie_engine;
398  	struct nand_ecc_engine *engine;
399  };
400  
401  /**
402   * struct nand_device - NAND device
403   * @mtd: MTD instance attached to the NAND device
404   * @memorg: memory layout
405   * @ecc: NAND ECC object attached to the NAND device
406   * @rowconv: position to row address converter
407   * @bbt: bad block table info
408   * @ops: NAND operations attached to the NAND device
409   *
410   * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
411   * should declare their own NAND object embedding a nand_device struct (that's
412   * how inheritance is done).
413   * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
414   * be filled at device detection time to reflect the NAND device
415   * capabilities/requirements. Once this is done nanddev_init() can be called.
416   * It will take care of converting NAND information into MTD ones, which means
417   * the specialized NAND layers should never manually tweak
418   * struct_nand_device->mtd except for the ->_read/write() hooks.
419   */
420  struct nand_device {
421  	struct mtd_info mtd;
422  	struct nand_memory_organization memorg;
423  	struct nand_ecc ecc;
424  	struct nand_row_converter rowconv;
425  	struct nand_bbt bbt;
426  	const struct nand_ops *ops;
427  };
428  
429  /**
430   * struct nand_io_iter - NAND I/O iterator
431   * @req: current I/O request
432   * @oobbytes_per_page: maximum number of OOB bytes per page
433   * @dataleft: remaining number of data bytes to read/write
434   * @oobleft: remaining number of OOB bytes to read/write
435   *
436   * Can be used by specialized NAND layers to iterate over all pages covered
437   * by an MTD I/O request, which should greatly simplifies the boiler-plate
438   * code needed to read/write data from/to a NAND device.
439   */
440  struct nand_io_iter {
441  	struct nand_page_io_req req;
442  	unsigned int oobbytes_per_page;
443  	unsigned int dataleft;
444  	unsigned int oobleft;
445  };
446  
447  /**
448   * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
449   * @mtd: MTD instance
450   *
451   * Return: the NAND device embedding @mtd.
452   */
mtd_to_nanddev(struct mtd_info * mtd)453  static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
454  {
455  	return container_of(mtd, struct nand_device, mtd);
456  }
457  
458  /**
459   * nanddev_to_mtd() - Get the MTD device attached to a NAND device
460   * @nand: NAND device
461   *
462   * Return: the MTD device embedded in @nand.
463   */
nanddev_to_mtd(struct nand_device * nand)464  static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
465  {
466  	return &nand->mtd;
467  }
468  
469  /*
470   * nanddev_bits_per_cell() - Get the number of bits per cell
471   * @nand: NAND device
472   *
473   * Return: the number of bits per cell.
474   */
nanddev_bits_per_cell(const struct nand_device * nand)475  static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
476  {
477  	return nand->memorg.bits_per_cell;
478  }
479  
480  /**
481   * nanddev_page_size() - Get NAND page size
482   * @nand: NAND device
483   *
484   * Return: the page size.
485   */
nanddev_page_size(const struct nand_device * nand)486  static inline size_t nanddev_page_size(const struct nand_device *nand)
487  {
488  	return nand->memorg.pagesize;
489  }
490  
491  /**
492   * nanddev_per_page_oobsize() - Get NAND OOB size
493   * @nand: NAND device
494   *
495   * Return: the OOB size.
496   */
497  static inline unsigned int
nanddev_per_page_oobsize(const struct nand_device * nand)498  nanddev_per_page_oobsize(const struct nand_device *nand)
499  {
500  	return nand->memorg.oobsize;
501  }
502  
503  /**
504   * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
505   * @nand: NAND device
506   *
507   * Return: the number of pages per eraseblock.
508   */
509  static inline unsigned int
nanddev_pages_per_eraseblock(const struct nand_device * nand)510  nanddev_pages_per_eraseblock(const struct nand_device *nand)
511  {
512  	return nand->memorg.pages_per_eraseblock;
513  }
514  
515  /**
516   * nanddev_pages_per_target() - Get the number of pages per target
517   * @nand: NAND device
518   *
519   * Return: the number of pages per target.
520   */
521  static inline unsigned int
nanddev_pages_per_target(const struct nand_device * nand)522  nanddev_pages_per_target(const struct nand_device *nand)
523  {
524  	return nand->memorg.pages_per_eraseblock *
525  	       nand->memorg.eraseblocks_per_lun *
526  	       nand->memorg.luns_per_target;
527  }
528  
529  /**
530   * nanddev_per_page_oobsize() - Get NAND erase block size
531   * @nand: NAND device
532   *
533   * Return: the eraseblock size.
534   */
nanddev_eraseblock_size(const struct nand_device * nand)535  static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
536  {
537  	return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
538  }
539  
540  /**
541   * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
542   * @nand: NAND device
543   *
544   * Return: the number of eraseblocks per LUN.
545   */
546  static inline unsigned int
nanddev_eraseblocks_per_lun(const struct nand_device * nand)547  nanddev_eraseblocks_per_lun(const struct nand_device *nand)
548  {
549  	return nand->memorg.eraseblocks_per_lun;
550  }
551  
552  /**
553   * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
554   * @nand: NAND device
555   *
556   * Return: the number of eraseblocks per target.
557   */
558  static inline unsigned int
nanddev_eraseblocks_per_target(const struct nand_device * nand)559  nanddev_eraseblocks_per_target(const struct nand_device *nand)
560  {
561  	return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
562  }
563  
564  /**
565   * nanddev_target_size() - Get the total size provided by a single target/die
566   * @nand: NAND device
567   *
568   * Return: the total size exposed by a single target/die in bytes.
569   */
nanddev_target_size(const struct nand_device * nand)570  static inline u64 nanddev_target_size(const struct nand_device *nand)
571  {
572  	return (u64)nand->memorg.luns_per_target *
573  	       nand->memorg.eraseblocks_per_lun *
574  	       nand->memorg.pages_per_eraseblock *
575  	       nand->memorg.pagesize;
576  }
577  
578  /**
579   * nanddev_ntarget() - Get the total of targets
580   * @nand: NAND device
581   *
582   * Return: the number of targets/dies exposed by @nand.
583   */
nanddev_ntargets(const struct nand_device * nand)584  static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
585  {
586  	return nand->memorg.ntargets;
587  }
588  
589  /**
590   * nanddev_neraseblocks() - Get the total number of eraseblocks
591   * @nand: NAND device
592   *
593   * Return: the total number of eraseblocks exposed by @nand.
594   */
nanddev_neraseblocks(const struct nand_device * nand)595  static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
596  {
597  	return nand->memorg.ntargets * nand->memorg.luns_per_target *
598  	       nand->memorg.eraseblocks_per_lun;
599  }
600  
601  /**
602   * nanddev_size() - Get NAND size
603   * @nand: NAND device
604   *
605   * Return: the total size (in bytes) exposed by @nand.
606   */
nanddev_size(const struct nand_device * nand)607  static inline u64 nanddev_size(const struct nand_device *nand)
608  {
609  	return nanddev_target_size(nand) * nanddev_ntargets(nand);
610  }
611  
612  /**
613   * nanddev_get_memorg() - Extract memory organization info from a NAND device
614   * @nand: NAND device
615   *
616   * This can be used by the upper layer to fill the memorg info before calling
617   * nanddev_init().
618   *
619   * Return: the memorg object embedded in the NAND device.
620   */
621  static inline struct nand_memory_organization *
nanddev_get_memorg(struct nand_device * nand)622  nanddev_get_memorg(struct nand_device *nand)
623  {
624  	return &nand->memorg;
625  }
626  
627  /**
628   * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
629   * @nand: NAND device
630   */
631  static inline const struct nand_ecc_props *
nanddev_get_ecc_conf(struct nand_device * nand)632  nanddev_get_ecc_conf(struct nand_device *nand)
633  {
634  	return &nand->ecc.ctx.conf;
635  }
636  
637  /**
638   * nanddev_get_ecc_nsteps() - Extract the number of ECC steps
639   * @nand: NAND device
640   */
641  static inline unsigned int
nanddev_get_ecc_nsteps(struct nand_device * nand)642  nanddev_get_ecc_nsteps(struct nand_device *nand)
643  {
644  	return nand->ecc.ctx.nsteps;
645  }
646  
647  /**
648   * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step
649   * @nand: NAND device
650   */
651  static inline unsigned int
nanddev_get_ecc_bytes_per_step(struct nand_device * nand)652  nanddev_get_ecc_bytes_per_step(struct nand_device *nand)
653  {
654  	return nand->ecc.ctx.total / nand->ecc.ctx.nsteps;
655  }
656  
657  /**
658   * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
659   *                                  device
660   * @nand: NAND device
661   */
662  static inline const struct nand_ecc_props *
nanddev_get_ecc_requirements(struct nand_device * nand)663  nanddev_get_ecc_requirements(struct nand_device *nand)
664  {
665  	return &nand->ecc.requirements;
666  }
667  
668  /**
669   * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
670   *                                  device
671   * @nand: NAND device
672   * @reqs: Requirements
673   */
674  static inline void
nanddev_set_ecc_requirements(struct nand_device * nand,const struct nand_ecc_props * reqs)675  nanddev_set_ecc_requirements(struct nand_device *nand,
676  			     const struct nand_ecc_props *reqs)
677  {
678  	nand->ecc.requirements = *reqs;
679  }
680  
681  int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
682  		 struct module *owner);
683  void nanddev_cleanup(struct nand_device *nand);
684  
685  /**
686   * nanddev_register() - Register a NAND device
687   * @nand: NAND device
688   *
689   * Register a NAND device.
690   * This function is just a wrapper around mtd_device_register()
691   * registering the MTD device embedded in @nand.
692   *
693   * Return: 0 in case of success, a negative error code otherwise.
694   */
nanddev_register(struct nand_device * nand)695  static inline int nanddev_register(struct nand_device *nand)
696  {
697  	return mtd_device_register(&nand->mtd, NULL, 0);
698  }
699  
700  /**
701   * nanddev_unregister() - Unregister a NAND device
702   * @nand: NAND device
703   *
704   * Unregister a NAND device.
705   * This function is just a wrapper around mtd_device_unregister()
706   * unregistering the MTD device embedded in @nand.
707   *
708   * Return: 0 in case of success, a negative error code otherwise.
709   */
nanddev_unregister(struct nand_device * nand)710  static inline int nanddev_unregister(struct nand_device *nand)
711  {
712  	return mtd_device_unregister(&nand->mtd);
713  }
714  
715  /**
716   * nanddev_set_of_node() - Attach a DT node to a NAND device
717   * @nand: NAND device
718   * @np: DT node
719   *
720   * Attach a DT node to a NAND device.
721   */
nanddev_set_of_node(struct nand_device * nand,struct device_node * np)722  static inline void nanddev_set_of_node(struct nand_device *nand,
723  				       struct device_node *np)
724  {
725  	mtd_set_of_node(&nand->mtd, np);
726  }
727  
728  /**
729   * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
730   * @nand: NAND device
731   *
732   * Return: the DT node attached to @nand.
733   */
nanddev_get_of_node(struct nand_device * nand)734  static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
735  {
736  	return mtd_get_of_node(&nand->mtd);
737  }
738  
739  /**
740   * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
741   * @nand: NAND device
742   * @offs: absolute NAND offset (usually passed by the MTD layer)
743   * @pos: a NAND position object to fill in
744   *
745   * Converts @offs into a nand_pos representation.
746   *
747   * Return: the offset within the NAND page pointed by @pos.
748   */
nanddev_offs_to_pos(struct nand_device * nand,loff_t offs,struct nand_pos * pos)749  static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
750  					       loff_t offs,
751  					       struct nand_pos *pos)
752  {
753  	unsigned int pageoffs;
754  	u64 tmp = offs;
755  
756  	pageoffs = do_div(tmp, nand->memorg.pagesize);
757  	pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
758  	pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
759  	pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
760  	pos->lun = do_div(tmp, nand->memorg.luns_per_target);
761  	pos->target = tmp;
762  
763  	return pageoffs;
764  }
765  
766  /**
767   * nanddev_pos_cmp() - Compare two NAND positions
768   * @a: First NAND position
769   * @b: Second NAND position
770   *
771   * Compares two NAND positions.
772   *
773   * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
774   */
nanddev_pos_cmp(const struct nand_pos * a,const struct nand_pos * b)775  static inline int nanddev_pos_cmp(const struct nand_pos *a,
776  				  const struct nand_pos *b)
777  {
778  	if (a->target != b->target)
779  		return a->target < b->target ? -1 : 1;
780  
781  	if (a->lun != b->lun)
782  		return a->lun < b->lun ? -1 : 1;
783  
784  	if (a->eraseblock != b->eraseblock)
785  		return a->eraseblock < b->eraseblock ? -1 : 1;
786  
787  	if (a->page != b->page)
788  		return a->page < b->page ? -1 : 1;
789  
790  	return 0;
791  }
792  
793  /**
794   * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
795   * @nand: NAND device
796   * @pos: the NAND position to convert
797   *
798   * Converts @pos NAND position into an absolute offset.
799   *
800   * Return: the absolute offset. Note that @pos points to the beginning of a
801   *	   page, if one wants to point to a specific offset within this page
802   *	   the returned offset has to be adjusted manually.
803   */
nanddev_pos_to_offs(struct nand_device * nand,const struct nand_pos * pos)804  static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
805  					 const struct nand_pos *pos)
806  {
807  	unsigned int npages;
808  
809  	npages = pos->page +
810  		 ((pos->eraseblock +
811  		   (pos->lun +
812  		    (pos->target * nand->memorg.luns_per_target)) *
813  		   nand->memorg.eraseblocks_per_lun) *
814  		  nand->memorg.pages_per_eraseblock);
815  
816  	return (loff_t)npages * nand->memorg.pagesize;
817  }
818  
819  /**
820   * nanddev_pos_to_row() - Extract a row address from a NAND position
821   * @nand: NAND device
822   * @pos: the position to convert
823   *
824   * Converts a NAND position into a row address that can then be passed to the
825   * device.
826   *
827   * Return: the row address extracted from @pos.
828   */
nanddev_pos_to_row(struct nand_device * nand,const struct nand_pos * pos)829  static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
830  					      const struct nand_pos *pos)
831  {
832  	return (pos->lun << nand->rowconv.lun_addr_shift) |
833  	       (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
834  	       pos->page;
835  }
836  
837  /**
838   * nanddev_pos_next_target() - Move a position to the next target/die
839   * @nand: NAND device
840   * @pos: the position to update
841   *
842   * Updates @pos to point to the start of the next target/die. Useful when you
843   * want to iterate over all targets/dies of a NAND device.
844   */
nanddev_pos_next_target(struct nand_device * nand,struct nand_pos * pos)845  static inline void nanddev_pos_next_target(struct nand_device *nand,
846  					   struct nand_pos *pos)
847  {
848  	pos->page = 0;
849  	pos->plane = 0;
850  	pos->eraseblock = 0;
851  	pos->lun = 0;
852  	pos->target++;
853  }
854  
855  /**
856   * nanddev_pos_next_lun() - Move a position to the next LUN
857   * @nand: NAND device
858   * @pos: the position to update
859   *
860   * Updates @pos to point to the start of the next LUN. Useful when you want to
861   * iterate over all LUNs of a NAND device.
862   */
nanddev_pos_next_lun(struct nand_device * nand,struct nand_pos * pos)863  static inline void nanddev_pos_next_lun(struct nand_device *nand,
864  					struct nand_pos *pos)
865  {
866  	if (pos->lun >= nand->memorg.luns_per_target - 1)
867  		return nanddev_pos_next_target(nand, pos);
868  
869  	pos->lun++;
870  	pos->page = 0;
871  	pos->plane = 0;
872  	pos->eraseblock = 0;
873  }
874  
875  /**
876   * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
877   * @nand: NAND device
878   * @pos: the position to update
879   *
880   * Updates @pos to point to the start of the next eraseblock. Useful when you
881   * want to iterate over all eraseblocks of a NAND device.
882   */
nanddev_pos_next_eraseblock(struct nand_device * nand,struct nand_pos * pos)883  static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
884  					       struct nand_pos *pos)
885  {
886  	if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
887  		return nanddev_pos_next_lun(nand, pos);
888  
889  	pos->eraseblock++;
890  	pos->page = 0;
891  	pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
892  }
893  
894  /**
895   * nanddev_pos_next_page() - Move a position to the next page
896   * @nand: NAND device
897   * @pos: the position to update
898   *
899   * Updates @pos to point to the start of the next page. Useful when you want to
900   * iterate over all pages of a NAND device.
901   */
nanddev_pos_next_page(struct nand_device * nand,struct nand_pos * pos)902  static inline void nanddev_pos_next_page(struct nand_device *nand,
903  					 struct nand_pos *pos)
904  {
905  	if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
906  		return nanddev_pos_next_eraseblock(nand, pos);
907  
908  	pos->page++;
909  }
910  
911  /**
912   * nand_io_page_iter_init - Initialize a NAND I/O iterator
913   * @nand: NAND device
914   * @offs: absolute offset
915   * @req: MTD request
916   * @iter: NAND I/O iterator
917   *
918   * Initializes a NAND iterator based on the information passed by the MTD
919   * layer for page jumps.
920   */
nanddev_io_page_iter_init(struct nand_device * nand,enum nand_page_io_req_type reqtype,loff_t offs,struct mtd_oob_ops * req,struct nand_io_iter * iter)921  static inline void nanddev_io_page_iter_init(struct nand_device *nand,
922  					     enum nand_page_io_req_type reqtype,
923  					     loff_t offs, struct mtd_oob_ops *req,
924  					     struct nand_io_iter *iter)
925  {
926  	struct mtd_info *mtd = nanddev_to_mtd(nand);
927  
928  	iter->req.type = reqtype;
929  	iter->req.mode = req->mode;
930  	iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
931  	iter->req.ooboffs = req->ooboffs;
932  	iter->oobbytes_per_page = mtd_oobavail(mtd, req);
933  	iter->dataleft = req->len;
934  	iter->oobleft = req->ooblen;
935  	iter->req.databuf.in = req->datbuf;
936  	iter->req.datalen = min_t(unsigned int,
937  				  nand->memorg.pagesize - iter->req.dataoffs,
938  				  iter->dataleft);
939  	iter->req.oobbuf.in = req->oobbuf;
940  	iter->req.ooblen = min_t(unsigned int,
941  				 iter->oobbytes_per_page - iter->req.ooboffs,
942  				 iter->oobleft);
943  	iter->req.continuous = false;
944  }
945  
946  /**
947   * nand_io_block_iter_init - Initialize a NAND I/O iterator
948   * @nand: NAND device
949   * @offs: absolute offset
950   * @req: MTD request
951   * @iter: NAND I/O iterator
952   *
953   * Initializes a NAND iterator based on the information passed by the MTD
954   * layer for block jumps (no OOB)
955   *
956   * In practice only reads may leverage this iterator.
957   */
nanddev_io_block_iter_init(struct nand_device * nand,enum nand_page_io_req_type reqtype,loff_t offs,struct mtd_oob_ops * req,struct nand_io_iter * iter)958  static inline void nanddev_io_block_iter_init(struct nand_device *nand,
959  					      enum nand_page_io_req_type reqtype,
960  					      loff_t offs, struct mtd_oob_ops *req,
961  					      struct nand_io_iter *iter)
962  {
963  	unsigned int offs_in_eb;
964  
965  	iter->req.type = reqtype;
966  	iter->req.mode = req->mode;
967  	iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
968  	iter->req.ooboffs = 0;
969  	iter->oobbytes_per_page = 0;
970  	iter->dataleft = req->len;
971  	iter->oobleft = 0;
972  	iter->req.databuf.in = req->datbuf;
973  	offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs;
974  	iter->req.datalen = min_t(unsigned int,
975  				  nanddev_eraseblock_size(nand) - offs_in_eb,
976  				  iter->dataleft);
977  	iter->req.oobbuf.in = NULL;
978  	iter->req.ooblen = 0;
979  	iter->req.continuous = true;
980  }
981  
982  /**
983   * nand_io_iter_next_page - Move to the next page
984   * @nand: NAND device
985   * @iter: NAND I/O iterator
986   *
987   * Updates the @iter to point to the next page.
988   */
nanddev_io_iter_next_page(struct nand_device * nand,struct nand_io_iter * iter)989  static inline void nanddev_io_iter_next_page(struct nand_device *nand,
990  					     struct nand_io_iter *iter)
991  {
992  	nanddev_pos_next_page(nand, &iter->req.pos);
993  	iter->dataleft -= iter->req.datalen;
994  	iter->req.databuf.in += iter->req.datalen;
995  	iter->oobleft -= iter->req.ooblen;
996  	iter->req.oobbuf.in += iter->req.ooblen;
997  	iter->req.dataoffs = 0;
998  	iter->req.ooboffs = 0;
999  	iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
1000  				  iter->dataleft);
1001  	iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
1002  				 iter->oobleft);
1003  }
1004  
1005  /**
1006   * nand_io_iter_next_block - Move to the next block
1007   * @nand: NAND device
1008   * @iter: NAND I/O iterator
1009   *
1010   * Updates the @iter to point to the next block.
1011   * No OOB handling available.
1012   */
nanddev_io_iter_next_block(struct nand_device * nand,struct nand_io_iter * iter)1013  static inline void nanddev_io_iter_next_block(struct nand_device *nand,
1014  					      struct nand_io_iter *iter)
1015  {
1016  	nanddev_pos_next_eraseblock(nand, &iter->req.pos);
1017  	iter->dataleft -= iter->req.datalen;
1018  	iter->req.databuf.in += iter->req.datalen;
1019  	iter->req.dataoffs = 0;
1020  	iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand),
1021  				  iter->dataleft);
1022  }
1023  
1024  /**
1025   * nand_io_iter_end - Should end iteration or not
1026   * @nand: NAND device
1027   * @iter: NAND I/O iterator
1028   *
1029   * Check whether @iter has reached the end of the NAND portion it was asked to
1030   * iterate on or not.
1031   *
1032   * Return: true if @iter has reached the end of the iteration request, false
1033   *	   otherwise.
1034   */
nanddev_io_iter_end(struct nand_device * nand,const struct nand_io_iter * iter)1035  static inline bool nanddev_io_iter_end(struct nand_device *nand,
1036  				       const struct nand_io_iter *iter)
1037  {
1038  	if (iter->dataleft || iter->oobleft)
1039  		return false;
1040  
1041  	return true;
1042  }
1043  
1044  /**
1045   * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
1046   *			   request
1047   * @nand: NAND device
1048   * @start: start address to read/write from
1049   * @req: MTD I/O request
1050   * @iter: NAND I/O iterator
1051   *
1052   * Should be used for iterating over pages that are contained in an MTD request.
1053   */
1054  #define nanddev_io_for_each_page(nand, type, start, req, iter)		\
1055  	for (nanddev_io_page_iter_init(nand, type, start, req, iter);	\
1056  	     !nanddev_io_iter_end(nand, iter);				\
1057  	     nanddev_io_iter_next_page(nand, iter))
1058  
1059  /**
1060   * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O
1061   *			    request, one block at a time
1062   * @nand: NAND device
1063   * @start: start address to read/write from
1064   * @req: MTD I/O request
1065   * @iter: NAND I/O iterator
1066   *
1067   * Should be used for iterating over blocks that are contained in an MTD request.
1068   */
1069  #define nanddev_io_for_each_block(nand, type, start, req, iter)		\
1070  	for (nanddev_io_block_iter_init(nand, type, start, req, iter);	\
1071  	     !nanddev_io_iter_end(nand, iter);				\
1072  	     nanddev_io_iter_next_block(nand, iter))
1073  
1074  bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
1075  bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
1076  int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
1077  
1078  /* ECC related functions */
1079  int nanddev_ecc_engine_init(struct nand_device *nand);
1080  void nanddev_ecc_engine_cleanup(struct nand_device *nand);
1081  
nand_to_ecc_ctx(struct nand_device * nand)1082  static inline void *nand_to_ecc_ctx(struct nand_device *nand)
1083  {
1084  	return nand->ecc.ctx.priv;
1085  }
1086  
1087  /* BBT related functions */
1088  enum nand_bbt_block_status {
1089  	NAND_BBT_BLOCK_STATUS_UNKNOWN,
1090  	NAND_BBT_BLOCK_GOOD,
1091  	NAND_BBT_BLOCK_WORN,
1092  	NAND_BBT_BLOCK_RESERVED,
1093  	NAND_BBT_BLOCK_FACTORY_BAD,
1094  	NAND_BBT_BLOCK_NUM_STATUS,
1095  };
1096  
1097  int nanddev_bbt_init(struct nand_device *nand);
1098  void nanddev_bbt_cleanup(struct nand_device *nand);
1099  int nanddev_bbt_update(struct nand_device *nand);
1100  int nanddev_bbt_get_block_status(const struct nand_device *nand,
1101  				 unsigned int entry);
1102  int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
1103  				 enum nand_bbt_block_status status);
1104  int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
1105  
1106  /**
1107   * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
1108   * @nand: NAND device
1109   * @pos: the NAND position we want to get BBT entry for
1110   *
1111   * Return the BBT entry used to store information about the eraseblock pointed
1112   * by @pos.
1113   *
1114   * Return: the BBT entry storing information about eraseblock pointed by @pos.
1115   */
nanddev_bbt_pos_to_entry(struct nand_device * nand,const struct nand_pos * pos)1116  static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
1117  						    const struct nand_pos *pos)
1118  {
1119  	return pos->eraseblock +
1120  	       ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
1121  		nand->memorg.eraseblocks_per_lun);
1122  }
1123  
1124  /**
1125   * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
1126   * @nand: NAND device
1127   *
1128   * Return: true if the BBT has been initialized, false otherwise.
1129   */
nanddev_bbt_is_initialized(struct nand_device * nand)1130  static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
1131  {
1132  	return !!nand->bbt.cache;
1133  }
1134  
1135  /* MTD -> NAND helper functions. */
1136  int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
1137  int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
1138  
1139  #endif /* __LINUX_MTD_NAND_H */
1140