nand.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright 2017 - Free Electrons
  4. *
  5. * Authors:
  6. * Boris Brezillon <boris.brezillon@free-electrons.com>
  7. * Peter Pan <peterpandong@micron.com>
  8. */
  9. #ifndef __LINUX_MTD_NAND_H
  10. #define __LINUX_MTD_NAND_H
  11. #include <linux/mtd/mtd.h>
  12. /**
  13. * struct nand_memory_organization - Memory organization structure
  14. * @bits_per_cell: number of bits per NAND cell
  15. * @pagesize: page size
  16. * @oobsize: OOB area size
  17. * @pages_per_eraseblock: number of pages per eraseblock
  18. * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
  19. * @planes_per_lun: number of planes per LUN
  20. * @luns_per_target: number of LUN per target (target is a synonym for die)
  21. * @ntargets: total number of targets exposed by the NAND device
  22. */
  23. struct nand_memory_organization {
  24. unsigned int bits_per_cell;
  25. unsigned int pagesize;
  26. unsigned int oobsize;
  27. unsigned int pages_per_eraseblock;
  28. unsigned int eraseblocks_per_lun;
  29. unsigned int planes_per_lun;
  30. unsigned int luns_per_target;
  31. unsigned int ntargets;
  32. };
  33. #define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
  34. { \
  35. .bits_per_cell = (bpc), \
  36. .pagesize = (ps), \
  37. .oobsize = (os), \
  38. .pages_per_eraseblock = (ppe), \
  39. .eraseblocks_per_lun = (epl), \
  40. .planes_per_lun = (ppl), \
  41. .luns_per_target = (lpt), \
  42. .ntargets = (nt), \
  43. }
  44. /**
  45. * struct nand_row_converter - Information needed to convert an absolute offset
  46. * into a row address
  47. * @lun_addr_shift: position of the LUN identifier in the row address
  48. * @eraseblock_addr_shift: position of the eraseblock identifier in the row
  49. * address
  50. */
  51. struct nand_row_converter {
  52. unsigned int lun_addr_shift;
  53. unsigned int eraseblock_addr_shift;
  54. };
  55. /**
  56. * struct nand_pos - NAND position object
  57. * @target: the NAND target/die
  58. * @lun: the LUN identifier
  59. * @plane: the plane within the LUN
  60. * @eraseblock: the eraseblock within the LUN
  61. * @page: the page within the LUN
  62. *
  63. * These information are usually used by specific sub-layers to select the
  64. * appropriate target/die and generate a row address to pass to the device.
  65. */
  66. struct nand_pos {
  67. unsigned int target;
  68. unsigned int lun;
  69. unsigned int plane;
  70. unsigned int eraseblock;
  71. unsigned int page;
  72. };
  73. /**
  74. * struct nand_page_io_req - NAND I/O request object
  75. * @pos: the position this I/O request is targeting
  76. * @dataoffs: the offset within the page
  77. * @datalen: number of data bytes to read from/write to this page
  78. * @databuf: buffer to store data in or get data from
  79. * @ooboffs: the OOB offset within the page
  80. * @ooblen: the number of OOB bytes to read from/write to this page
  81. * @oobbuf: buffer to store OOB data in or get OOB data from
  82. * @mode: one of the %MTD_OPS_XXX mode
  83. *
  84. * This object is used to pass per-page I/O requests to NAND sub-layers. This
  85. * way all useful information are already formatted in a useful way and
  86. * specific NAND layers can focus on translating these information into
  87. * specific commands/operations.
  88. */
  89. struct nand_page_io_req {
  90. struct nand_pos pos;
  91. unsigned int dataoffs;
  92. unsigned int datalen;
  93. union {
  94. const void *out;
  95. void *in;
  96. } databuf;
  97. unsigned int ooboffs;
  98. unsigned int ooblen;
  99. union {
  100. const void *out;
  101. void *in;
  102. } oobbuf;
  103. int mode;
  104. };
  105. /**
  106. * struct nand_ecc_req - NAND ECC requirements
  107. * @strength: ECC strength
  108. * @step_size: ECC step/block size
  109. */
  110. struct nand_ecc_req {
  111. unsigned int strength;
  112. unsigned int step_size;
  113. };
  114. #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
  115. /**
  116. * struct nand_bbt - bad block table object
  117. * @cache: in memory BBT cache
  118. */
  119. struct nand_bbt {
  120. unsigned long *cache;
  121. };
  122. struct nand_device;
  123. /**
  124. * struct nand_ops - NAND operations
  125. * @erase: erase a specific block. No need to check if the block is bad before
  126. * erasing, this has been taken care of by the generic NAND layer
  127. * @markbad: mark a specific block bad. No need to check if the block is
  128. * already marked bad, this has been taken care of by the generic
  129. * NAND layer. This method should just write the BBM (Bad Block
  130. * Marker) so that future call to struct_nand_ops->isbad() return
  131. * true
  132. * @isbad: check whether a block is bad or not. This method should just read
  133. * the BBM and return whether the block is bad or not based on what it
  134. * reads
  135. *
  136. * These are all low level operations that should be implemented by specialized
  137. * NAND layers (SPI NAND, raw NAND, ...).
  138. */
  139. struct nand_ops {
  140. int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
  141. int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
  142. bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
  143. };
  144. /**
  145. * struct nand_device - NAND device
  146. * @mtd: MTD instance attached to the NAND device
  147. * @memorg: memory layout
  148. * @eccreq: ECC requirements
  149. * @rowconv: position to row address converter
  150. * @bbt: bad block table info
  151. * @ops: NAND operations attached to the NAND device
  152. *
  153. * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
  154. * should declare their own NAND object embedding a nand_device struct (that's
  155. * how inheritance is done).
  156. * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
  157. * at device detection time to reflect the NAND device
  158. * capabilities/requirements. Once this is done nanddev_init() can be called.
  159. * It will take care of converting NAND information into MTD ones, which means
  160. * the specialized NAND layers should never manually tweak
  161. * struct_nand_device->mtd except for the ->_read/write() hooks.
  162. */
  163. struct nand_device {
  164. struct mtd_info *mtd;
  165. struct nand_memory_organization memorg;
  166. struct nand_ecc_req eccreq;
  167. struct nand_row_converter rowconv;
  168. struct nand_bbt bbt;
  169. const struct nand_ops *ops;
  170. };
  171. /**
  172. * struct nand_io_iter - NAND I/O iterator
  173. * @req: current I/O request
  174. * @oobbytes_per_page: maximum number of OOB bytes per page
  175. * @dataleft: remaining number of data bytes to read/write
  176. * @oobleft: remaining number of OOB bytes to read/write
  177. *
  178. * Can be used by specialized NAND layers to iterate over all pages covered
  179. * by an MTD I/O request, which should greatly simplifies the boiler-plate
  180. * code needed to read/write data from/to a NAND device.
  181. */
  182. struct nand_io_iter {
  183. struct nand_page_io_req req;
  184. unsigned int oobbytes_per_page;
  185. unsigned int dataleft;
  186. unsigned int oobleft;
  187. };
  188. /**
  189. * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
  190. * @mtd: MTD instance
  191. *
  192. * Return: the NAND device embedding @mtd.
  193. */
  194. static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
  195. {
  196. return mtd->priv;
  197. }
  198. /**
  199. * nanddev_to_mtd() - Get the MTD device attached to a NAND device
  200. * @nand: NAND device
  201. *
  202. * Return: the MTD device embedded in @nand.
  203. */
  204. static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
  205. {
  206. return nand->mtd;
  207. }
  208. /*
  209. * nanddev_bits_per_cell() - Get the number of bits per cell
  210. * @nand: NAND device
  211. *
  212. * Return: the number of bits per cell.
  213. */
  214. static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
  215. {
  216. return nand->memorg.bits_per_cell;
  217. }
  218. /**
  219. * nanddev_page_size() - Get NAND page size
  220. * @nand: NAND device
  221. *
  222. * Return: the page size.
  223. */
  224. static inline size_t nanddev_page_size(const struct nand_device *nand)
  225. {
  226. return nand->memorg.pagesize;
  227. }
  228. /**
  229. * nanddev_per_page_oobsize() - Get NAND OOB size
  230. * @nand: NAND device
  231. *
  232. * Return: the OOB size.
  233. */
  234. static inline unsigned int
  235. nanddev_per_page_oobsize(const struct nand_device *nand)
  236. {
  237. return nand->memorg.oobsize;
  238. }
  239. /**
  240. * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
  241. * @nand: NAND device
  242. *
  243. * Return: the number of pages per eraseblock.
  244. */
  245. static inline unsigned int
  246. nanddev_pages_per_eraseblock(const struct nand_device *nand)
  247. {
  248. return nand->memorg.pages_per_eraseblock;
  249. }
  250. /**
  251. * nanddev_per_page_oobsize() - Get NAND erase block size
  252. * @nand: NAND device
  253. *
  254. * Return: the eraseblock size.
  255. */
  256. static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
  257. {
  258. return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
  259. }
  260. /**
  261. * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
  262. * @nand: NAND device
  263. *
  264. * Return: the number of eraseblocks per LUN.
  265. */
  266. static inline unsigned int
  267. nanddev_eraseblocks_per_lun(const struct nand_device *nand)
  268. {
  269. return nand->memorg.eraseblocks_per_lun;
  270. }
  271. /**
  272. * nanddev_target_size() - Get the total size provided by a single target/die
  273. * @nand: NAND device
  274. *
  275. * Return: the total size exposed by a single target/die in bytes.
  276. */
  277. static inline u64 nanddev_target_size(const struct nand_device *nand)
  278. {
  279. return (u64)nand->memorg.luns_per_target *
  280. nand->memorg.eraseblocks_per_lun *
  281. nand->memorg.pages_per_eraseblock *
  282. nand->memorg.pagesize;
  283. }
  284. /**
  285. * nanddev_ntarget() - Get the total of targets
  286. * @nand: NAND device
  287. *
  288. * Return: the number of targets/dies exposed by @nand.
  289. */
  290. static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
  291. {
  292. return nand->memorg.ntargets;
  293. }
  294. /**
  295. * nanddev_neraseblocks() - Get the total number of erasablocks
  296. * @nand: NAND device
  297. *
  298. * Return: the total number of eraseblocks exposed by @nand.
  299. */
  300. static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
  301. {
  302. return (u64)nand->memorg.luns_per_target *
  303. nand->memorg.eraseblocks_per_lun *
  304. nand->memorg.pages_per_eraseblock;
  305. }
  306. /**
  307. * nanddev_size() - Get NAND size
  308. * @nand: NAND device
  309. *
  310. * Return: the total size (in bytes) exposed by @nand.
  311. */
  312. static inline u64 nanddev_size(const struct nand_device *nand)
  313. {
  314. return nanddev_target_size(nand) * nanddev_ntargets(nand);
  315. }
  316. /**
  317. * nanddev_get_memorg() - Extract memory organization info from a NAND device
  318. * @nand: NAND device
  319. *
  320. * This can be used by the upper layer to fill the memorg info before calling
  321. * nanddev_init().
  322. *
  323. * Return: the memorg object embedded in the NAND device.
  324. */
  325. static inline struct nand_memory_organization *
  326. nanddev_get_memorg(struct nand_device *nand)
  327. {
  328. return &nand->memorg;
  329. }
  330. int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
  331. struct module *owner);
  332. void nanddev_cleanup(struct nand_device *nand);
  333. /**
  334. * nanddev_register() - Register a NAND device
  335. * @nand: NAND device
  336. *
  337. * Register a NAND device.
  338. * This function is just a wrapper around mtd_device_register()
  339. * registering the MTD device embedded in @nand.
  340. *
  341. * Return: 0 in case of success, a negative error code otherwise.
  342. */
  343. static inline int nanddev_register(struct nand_device *nand)
  344. {
  345. return mtd_device_register(nand->mtd, NULL, 0);
  346. }
  347. /**
  348. * nanddev_unregister() - Unregister a NAND device
  349. * @nand: NAND device
  350. *
  351. * Unregister a NAND device.
  352. * This function is just a wrapper around mtd_device_unregister()
  353. * unregistering the MTD device embedded in @nand.
  354. *
  355. * Return: 0 in case of success, a negative error code otherwise.
  356. */
  357. static inline int nanddev_unregister(struct nand_device *nand)
  358. {
  359. return mtd_device_unregister(nand->mtd);
  360. }
  361. /**
  362. * nanddev_set_of_node() - Attach a DT node to a NAND device
  363. * @nand: NAND device
  364. * @np: DT node
  365. *
  366. * Attach a DT node to a NAND device.
  367. */
  368. static inline void nanddev_set_of_node(struct nand_device *nand,
  369. const struct device_node *np)
  370. {
  371. mtd_set_of_node(nand->mtd, np);
  372. }
  373. /**
  374. * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
  375. * @nand: NAND device
  376. *
  377. * Return: the DT node attached to @nand.
  378. */
  379. static inline const struct device_node *nanddev_get_of_node(struct nand_device *nand)
  380. {
  381. return mtd_get_of_node(nand->mtd);
  382. }
  383. /**
  384. * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
  385. * @nand: NAND device
  386. * @offs: absolute NAND offset (usually passed by the MTD layer)
  387. * @pos: a NAND position object to fill in
  388. *
  389. * Converts @offs into a nand_pos representation.
  390. *
  391. * Return: the offset within the NAND page pointed by @pos.
  392. */
  393. static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
  394. loff_t offs,
  395. struct nand_pos *pos)
  396. {
  397. unsigned int pageoffs;
  398. u64 tmp = offs;
  399. pageoffs = do_div(tmp, nand->memorg.pagesize);
  400. pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
  401. pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
  402. pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
  403. pos->lun = do_div(tmp, nand->memorg.luns_per_target);
  404. pos->target = tmp;
  405. return pageoffs;
  406. }
  407. /**
  408. * nanddev_pos_cmp() - Compare two NAND positions
  409. * @a: First NAND position
  410. * @b: Second NAND position
  411. *
  412. * Compares two NAND positions.
  413. *
  414. * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
  415. */
  416. static inline int nanddev_pos_cmp(const struct nand_pos *a,
  417. const struct nand_pos *b)
  418. {
  419. if (a->target != b->target)
  420. return a->target < b->target ? -1 : 1;
  421. if (a->lun != b->lun)
  422. return a->lun < b->lun ? -1 : 1;
  423. if (a->eraseblock != b->eraseblock)
  424. return a->eraseblock < b->eraseblock ? -1 : 1;
  425. if (a->page != b->page)
  426. return a->page < b->page ? -1 : 1;
  427. return 0;
  428. }
  429. /**
  430. * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
  431. * @nand: NAND device
  432. * @pos: the NAND position to convert
  433. *
  434. * Converts @pos NAND position into an absolute offset.
  435. *
  436. * Return: the absolute offset. Note that @pos points to the beginning of a
  437. * page, if one wants to point to a specific offset within this page
  438. * the returned offset has to be adjusted manually.
  439. */
  440. static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
  441. const struct nand_pos *pos)
  442. {
  443. unsigned int npages;
  444. npages = pos->page +
  445. ((pos->eraseblock +
  446. (pos->lun +
  447. (pos->target * nand->memorg.luns_per_target)) *
  448. nand->memorg.eraseblocks_per_lun) *
  449. nand->memorg.pages_per_eraseblock);
  450. return (loff_t)npages * nand->memorg.pagesize;
  451. }
  452. /**
  453. * nanddev_pos_to_row() - Extract a row address from a NAND position
  454. * @nand: NAND device
  455. * @pos: the position to convert
  456. *
  457. * Converts a NAND position into a row address that can then be passed to the
  458. * device.
  459. *
  460. * Return: the row address extracted from @pos.
  461. */
  462. static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
  463. const struct nand_pos *pos)
  464. {
  465. return (pos->lun << nand->rowconv.lun_addr_shift) |
  466. (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
  467. pos->page;
  468. }
  469. /**
  470. * nanddev_pos_next_target() - Move a position to the next target/die
  471. * @nand: NAND device
  472. * @pos: the position to update
  473. *
  474. * Updates @pos to point to the start of the next target/die. Useful when you
  475. * want to iterate over all targets/dies of a NAND device.
  476. */
  477. static inline void nanddev_pos_next_target(struct nand_device *nand,
  478. struct nand_pos *pos)
  479. {
  480. pos->page = 0;
  481. pos->plane = 0;
  482. pos->eraseblock = 0;
  483. pos->lun = 0;
  484. pos->target++;
  485. }
  486. /**
  487. * nanddev_pos_next_lun() - Move a position to the next LUN
  488. * @nand: NAND device
  489. * @pos: the position to update
  490. *
  491. * Updates @pos to point to the start of the next LUN. Useful when you want to
  492. * iterate over all LUNs of a NAND device.
  493. */
  494. static inline void nanddev_pos_next_lun(struct nand_device *nand,
  495. struct nand_pos *pos)
  496. {
  497. if (pos->lun >= nand->memorg.luns_per_target - 1)
  498. return nanddev_pos_next_target(nand, pos);
  499. pos->lun++;
  500. pos->page = 0;
  501. pos->plane = 0;
  502. pos->eraseblock = 0;
  503. }
  504. /**
  505. * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
  506. * @nand: NAND device
  507. * @pos: the position to update
  508. *
  509. * Updates @pos to point to the start of the next eraseblock. Useful when you
  510. * want to iterate over all eraseblocks of a NAND device.
  511. */
  512. static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
  513. struct nand_pos *pos)
  514. {
  515. if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
  516. return nanddev_pos_next_lun(nand, pos);
  517. pos->eraseblock++;
  518. pos->page = 0;
  519. pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
  520. }
  521. /**
  522. * nanddev_pos_next_eraseblock() - Move a position to the next page
  523. * @nand: NAND device
  524. * @pos: the position to update
  525. *
  526. * Updates @pos to point to the start of the next page. Useful when you want to
  527. * iterate over all pages of a NAND device.
  528. */
  529. static inline void nanddev_pos_next_page(struct nand_device *nand,
  530. struct nand_pos *pos)
  531. {
  532. if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
  533. return nanddev_pos_next_eraseblock(nand, pos);
  534. pos->page++;
  535. }
  536. /**
  537. * nand_io_iter_init - Initialize a NAND I/O iterator
  538. * @nand: NAND device
  539. * @offs: absolute offset
  540. * @req: MTD request
  541. * @iter: NAND I/O iterator
  542. *
  543. * Initializes a NAND iterator based on the information passed by the MTD
  544. * layer.
  545. */
  546. static inline void nanddev_io_iter_init(struct nand_device *nand,
  547. loff_t offs, struct mtd_oob_ops *req,
  548. struct nand_io_iter *iter)
  549. {
  550. struct mtd_info *mtd = nanddev_to_mtd(nand);
  551. iter->req.mode = req->mode;
  552. iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
  553. iter->req.ooboffs = req->ooboffs;
  554. iter->oobbytes_per_page = mtd_oobavail(mtd, req);
  555. iter->dataleft = req->len;
  556. iter->oobleft = req->ooblen;
  557. iter->req.databuf.in = req->datbuf;
  558. iter->req.datalen = min_t(unsigned int,
  559. nand->memorg.pagesize - iter->req.dataoffs,
  560. iter->dataleft);
  561. iter->req.oobbuf.in = req->oobbuf;
  562. iter->req.ooblen = min_t(unsigned int,
  563. iter->oobbytes_per_page - iter->req.ooboffs,
  564. iter->oobleft);
  565. }
  566. /**
  567. * nand_io_iter_next_page - Move to the next page
  568. * @nand: NAND device
  569. * @iter: NAND I/O iterator
  570. *
  571. * Updates the @iter to point to the next page.
  572. */
  573. static inline void nanddev_io_iter_next_page(struct nand_device *nand,
  574. struct nand_io_iter *iter)
  575. {
  576. nanddev_pos_next_page(nand, &iter->req.pos);
  577. iter->dataleft -= iter->req.datalen;
  578. iter->req.databuf.in += iter->req.datalen;
  579. iter->oobleft -= iter->req.ooblen;
  580. iter->req.oobbuf.in += iter->req.ooblen;
  581. iter->req.dataoffs = 0;
  582. iter->req.ooboffs = 0;
  583. iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
  584. iter->dataleft);
  585. iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
  586. iter->oobleft);
  587. }
  588. /**
  589. * nand_io_iter_end - Should end iteration or not
  590. * @nand: NAND device
  591. * @iter: NAND I/O iterator
  592. *
  593. * Check whether @iter has reached the end of the NAND portion it was asked to
  594. * iterate on or not.
  595. *
  596. * Return: true if @iter has reached the end of the iteration request, false
  597. * otherwise.
  598. */
  599. static inline bool nanddev_io_iter_end(struct nand_device *nand,
  600. const struct nand_io_iter *iter)
  601. {
  602. if (iter->dataleft || iter->oobleft)
  603. return false;
  604. return true;
  605. }
  606. /**
  607. * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
  608. * request
  609. * @nand: NAND device
  610. * @start: start address to read/write from
  611. * @req: MTD I/O request
  612. * @iter: NAND I/O iterator
  613. *
  614. * Should be used for iterate over pages that are contained in an MTD request.
  615. */
  616. #define nanddev_io_for_each_page(nand, start, req, iter) \
  617. for (nanddev_io_iter_init(nand, start, req, iter); \
  618. !nanddev_io_iter_end(nand, iter); \
  619. nanddev_io_iter_next_page(nand, iter))
  620. bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
  621. bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
  622. int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
  623. int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
  624. /* BBT related functions */
  625. enum nand_bbt_block_status {
  626. NAND_BBT_BLOCK_STATUS_UNKNOWN,
  627. NAND_BBT_BLOCK_GOOD,
  628. NAND_BBT_BLOCK_WORN,
  629. NAND_BBT_BLOCK_RESERVED,
  630. NAND_BBT_BLOCK_FACTORY_BAD,
  631. NAND_BBT_BLOCK_NUM_STATUS,
  632. };
  633. int nanddev_bbt_init(struct nand_device *nand);
  634. void nanddev_bbt_cleanup(struct nand_device *nand);
  635. int nanddev_bbt_update(struct nand_device *nand);
  636. int nanddev_bbt_get_block_status(const struct nand_device *nand,
  637. unsigned int entry);
  638. int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
  639. enum nand_bbt_block_status status);
  640. int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
  641. /**
  642. * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
  643. * @nand: NAND device
  644. * @pos: the NAND position we want to get BBT entry for
  645. *
  646. * Return the BBT entry used to store information about the eraseblock pointed
  647. * by @pos.
  648. *
  649. * Return: the BBT entry storing information about eraseblock pointed by @pos.
  650. */
  651. static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
  652. const struct nand_pos *pos)
  653. {
  654. return pos->eraseblock +
  655. ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
  656. nand->memorg.eraseblocks_per_lun);
  657. }
  658. /**
  659. * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
  660. * @nand: NAND device
  661. *
  662. * Return: true if the BBT has been initialized, false otherwise.
  663. */
  664. static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
  665. {
  666. return !!nand->bbt.cache;
  667. }
  668. /* MTD -> NAND helper functions. */
  669. int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
  670. #endif /* __LINUX_MTD_NAND_H */