mtdcore.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Core registration and callback routines for MTD
  4. * drivers and users.
  5. *
  6. * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
  7. * Copyright © 2006 Red Hat UK Limited
  8. *
  9. */
  10. #ifndef __UBOOT__
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/ptrace.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/string.h>
  16. #include <linux/timer.h>
  17. #include <linux/major.h>
  18. #include <linux/fs.h>
  19. #include <linux/err.h>
  20. #include <linux/ioctl.h>
  21. #include <linux/init.h>
  22. #include <linux/proc_fs.h>
  23. #include <linux/idr.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/gfp.h>
  26. #include <linux/slab.h>
  27. #else
  28. #include <linux/err.h>
  29. #include <ubi_uboot.h>
  30. #endif
  31. #include <linux/log2.h>
  32. #include <linux/mtd/mtd.h>
  33. #include <linux/mtd/partitions.h>
  34. #include "mtdcore.h"
  35. #ifndef __UBOOT__
  36. /*
  37. * backing device capabilities for non-mappable devices (such as NAND flash)
  38. * - permits private mappings, copies are taken of the data
  39. */
  40. static struct backing_dev_info mtd_bdi_unmappable = {
  41. .capabilities = BDI_CAP_MAP_COPY,
  42. };
  43. /*
  44. * backing device capabilities for R/O mappable devices (such as ROM)
  45. * - permits private mappings, copies are taken of the data
  46. * - permits non-writable shared mappings
  47. */
  48. static struct backing_dev_info mtd_bdi_ro_mappable = {
  49. .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
  50. BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
  51. };
  52. /*
  53. * backing device capabilities for writable mappable devices (such as RAM)
  54. * - permits private mappings, copies are taken of the data
  55. * - permits non-writable shared mappings
  56. */
  57. static struct backing_dev_info mtd_bdi_rw_mappable = {
  58. .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
  59. BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
  60. BDI_CAP_WRITE_MAP),
  61. };
  62. static int mtd_cls_suspend(struct device *dev, pm_message_t state);
  63. static int mtd_cls_resume(struct device *dev);
  64. static struct class mtd_class = {
  65. .name = "mtd",
  66. .owner = THIS_MODULE,
  67. .suspend = mtd_cls_suspend,
  68. .resume = mtd_cls_resume,
  69. };
  70. #else
  71. struct mtd_info *mtd_table[MAX_MTD_DEVICES];
  72. #define MAX_IDR_ID 64
  73. struct idr_layer {
  74. int used;
  75. void *ptr;
  76. };
  77. struct idr {
  78. struct idr_layer id[MAX_IDR_ID];
  79. };
  80. #define DEFINE_IDR(name) struct idr name;
  81. void idr_remove(struct idr *idp, int id)
  82. {
  83. if (idp->id[id].used)
  84. idp->id[id].used = 0;
  85. return;
  86. }
  87. void *idr_find(struct idr *idp, int id)
  88. {
  89. if (idp->id[id].used)
  90. return idp->id[id].ptr;
  91. return NULL;
  92. }
  93. void *idr_get_next(struct idr *idp, int *next)
  94. {
  95. void *ret;
  96. int id = *next;
  97. ret = idr_find(idp, id);
  98. if (ret) {
  99. id ++;
  100. if (!idp->id[id].used)
  101. id = 0;
  102. *next = id;
  103. } else {
  104. *next = 0;
  105. }
  106. return ret;
  107. }
  108. int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask)
  109. {
  110. struct idr_layer *idl;
  111. int i = 0;
  112. while (i < MAX_IDR_ID) {
  113. idl = &idp->id[i];
  114. if (idl->used == 0) {
  115. idl->used = 1;
  116. idl->ptr = ptr;
  117. return i;
  118. }
  119. i++;
  120. }
  121. return -ENOSPC;
  122. }
  123. #endif
  124. static DEFINE_IDR(mtd_idr);
  125. /* These are exported solely for the purpose of mtd_blkdevs.c. You
  126. should not use them for _anything_ else */
  127. DEFINE_MUTEX(mtd_table_mutex);
  128. EXPORT_SYMBOL_GPL(mtd_table_mutex);
  129. struct mtd_info *__mtd_next_device(int i)
  130. {
  131. return idr_get_next(&mtd_idr, &i);
  132. }
  133. EXPORT_SYMBOL_GPL(__mtd_next_device);
  134. #ifndef __UBOOT__
  135. static LIST_HEAD(mtd_notifiers);
  136. #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
  137. /* REVISIT once MTD uses the driver model better, whoever allocates
  138. * the mtd_info will probably want to use the release() hook...
  139. */
  140. static void mtd_release(struct device *dev)
  141. {
  142. struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
  143. dev_t index = MTD_DEVT(mtd->index);
  144. /* remove /dev/mtdXro node if needed */
  145. if (index)
  146. device_destroy(&mtd_class, index + 1);
  147. }
  148. static int mtd_cls_suspend(struct device *dev, pm_message_t state)
  149. {
  150. struct mtd_info *mtd = dev_get_drvdata(dev);
  151. return mtd ? mtd_suspend(mtd) : 0;
  152. }
  153. static int mtd_cls_resume(struct device *dev)
  154. {
  155. struct mtd_info *mtd = dev_get_drvdata(dev);
  156. if (mtd)
  157. mtd_resume(mtd);
  158. return 0;
  159. }
  160. static ssize_t mtd_type_show(struct device *dev,
  161. struct device_attribute *attr, char *buf)
  162. {
  163. struct mtd_info *mtd = dev_get_drvdata(dev);
  164. char *type;
  165. switch (mtd->type) {
  166. case MTD_ABSENT:
  167. type = "absent";
  168. break;
  169. case MTD_RAM:
  170. type = "ram";
  171. break;
  172. case MTD_ROM:
  173. type = "rom";
  174. break;
  175. case MTD_NORFLASH:
  176. type = "nor";
  177. break;
  178. case MTD_NANDFLASH:
  179. type = "nand";
  180. break;
  181. case MTD_DATAFLASH:
  182. type = "dataflash";
  183. break;
  184. case MTD_UBIVOLUME:
  185. type = "ubi";
  186. break;
  187. case MTD_MLCNANDFLASH:
  188. type = "mlc-nand";
  189. break;
  190. default:
  191. type = "unknown";
  192. }
  193. return snprintf(buf, PAGE_SIZE, "%s\n", type);
  194. }
  195. static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
  196. static ssize_t mtd_flags_show(struct device *dev,
  197. struct device_attribute *attr, char *buf)
  198. {
  199. struct mtd_info *mtd = dev_get_drvdata(dev);
  200. return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
  201. }
  202. static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
  203. static ssize_t mtd_size_show(struct device *dev,
  204. struct device_attribute *attr, char *buf)
  205. {
  206. struct mtd_info *mtd = dev_get_drvdata(dev);
  207. return snprintf(buf, PAGE_SIZE, "%llu\n",
  208. (unsigned long long)mtd->size);
  209. }
  210. static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
  211. static ssize_t mtd_erasesize_show(struct device *dev,
  212. struct device_attribute *attr, char *buf)
  213. {
  214. struct mtd_info *mtd = dev_get_drvdata(dev);
  215. return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
  216. }
  217. static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
  218. static ssize_t mtd_writesize_show(struct device *dev,
  219. struct device_attribute *attr, char *buf)
  220. {
  221. struct mtd_info *mtd = dev_get_drvdata(dev);
  222. return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
  223. }
  224. static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
  225. static ssize_t mtd_subpagesize_show(struct device *dev,
  226. struct device_attribute *attr, char *buf)
  227. {
  228. struct mtd_info *mtd = dev_get_drvdata(dev);
  229. unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
  230. return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
  231. }
  232. static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
  233. static ssize_t mtd_oobsize_show(struct device *dev,
  234. struct device_attribute *attr, char *buf)
  235. {
  236. struct mtd_info *mtd = dev_get_drvdata(dev);
  237. return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
  238. }
  239. static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
  240. static ssize_t mtd_numeraseregions_show(struct device *dev,
  241. struct device_attribute *attr, char *buf)
  242. {
  243. struct mtd_info *mtd = dev_get_drvdata(dev);
  244. return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
  245. }
  246. static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
  247. NULL);
  248. static ssize_t mtd_name_show(struct device *dev,
  249. struct device_attribute *attr, char *buf)
  250. {
  251. struct mtd_info *mtd = dev_get_drvdata(dev);
  252. return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
  253. }
  254. static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
  255. static ssize_t mtd_ecc_strength_show(struct device *dev,
  256. struct device_attribute *attr, char *buf)
  257. {
  258. struct mtd_info *mtd = dev_get_drvdata(dev);
  259. return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
  260. }
  261. static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
  262. static ssize_t mtd_bitflip_threshold_show(struct device *dev,
  263. struct device_attribute *attr,
  264. char *buf)
  265. {
  266. struct mtd_info *mtd = dev_get_drvdata(dev);
  267. return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
  268. }
  269. static ssize_t mtd_bitflip_threshold_store(struct device *dev,
  270. struct device_attribute *attr,
  271. const char *buf, size_t count)
  272. {
  273. struct mtd_info *mtd = dev_get_drvdata(dev);
  274. unsigned int bitflip_threshold;
  275. int retval;
  276. retval = kstrtouint(buf, 0, &bitflip_threshold);
  277. if (retval)
  278. return retval;
  279. mtd->bitflip_threshold = bitflip_threshold;
  280. return count;
  281. }
  282. static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
  283. mtd_bitflip_threshold_show,
  284. mtd_bitflip_threshold_store);
  285. static ssize_t mtd_ecc_step_size_show(struct device *dev,
  286. struct device_attribute *attr, char *buf)
  287. {
  288. struct mtd_info *mtd = dev_get_drvdata(dev);
  289. return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
  290. }
  291. static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
  292. static struct attribute *mtd_attrs[] = {
  293. &dev_attr_type.attr,
  294. &dev_attr_flags.attr,
  295. &dev_attr_size.attr,
  296. &dev_attr_erasesize.attr,
  297. &dev_attr_writesize.attr,
  298. &dev_attr_subpagesize.attr,
  299. &dev_attr_oobsize.attr,
  300. &dev_attr_numeraseregions.attr,
  301. &dev_attr_name.attr,
  302. &dev_attr_ecc_strength.attr,
  303. &dev_attr_ecc_step_size.attr,
  304. &dev_attr_bitflip_threshold.attr,
  305. NULL,
  306. };
  307. ATTRIBUTE_GROUPS(mtd);
  308. static struct device_type mtd_devtype = {
  309. .name = "mtd",
  310. .groups = mtd_groups,
  311. .release = mtd_release,
  312. };
  313. #endif
  314. /**
  315. * add_mtd_device - register an MTD device
  316. * @mtd: pointer to new MTD device info structure
  317. *
  318. * Add a device to the list of MTD devices present in the system, and
  319. * notify each currently active MTD 'user' of its arrival. Returns
  320. * zero on success or 1 on failure, which currently will only happen
  321. * if there is insufficient memory or a sysfs error.
  322. */
  323. int add_mtd_device(struct mtd_info *mtd)
  324. {
  325. #ifndef __UBOOT__
  326. struct mtd_notifier *not;
  327. #endif
  328. int i, error;
  329. #ifndef __UBOOT__
  330. if (!mtd->backing_dev_info) {
  331. switch (mtd->type) {
  332. case MTD_RAM:
  333. mtd->backing_dev_info = &mtd_bdi_rw_mappable;
  334. break;
  335. case MTD_ROM:
  336. mtd->backing_dev_info = &mtd_bdi_ro_mappable;
  337. break;
  338. default:
  339. mtd->backing_dev_info = &mtd_bdi_unmappable;
  340. break;
  341. }
  342. }
  343. #endif
  344. BUG_ON(mtd->writesize == 0);
  345. mutex_lock(&mtd_table_mutex);
  346. i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
  347. if (i < 0)
  348. goto fail_locked;
  349. mtd->index = i;
  350. mtd->usecount = 0;
  351. /* default value if not set by driver */
  352. if (mtd->bitflip_threshold == 0)
  353. mtd->bitflip_threshold = mtd->ecc_strength;
  354. if (is_power_of_2(mtd->erasesize))
  355. mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
  356. else
  357. mtd->erasesize_shift = 0;
  358. if (is_power_of_2(mtd->writesize))
  359. mtd->writesize_shift = ffs(mtd->writesize) - 1;
  360. else
  361. mtd->writesize_shift = 0;
  362. mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
  363. mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
  364. /* Some chips always power up locked. Unlock them now */
  365. if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
  366. error = mtd_unlock(mtd, 0, mtd->size);
  367. if (error && error != -EOPNOTSUPP)
  368. printk(KERN_WARNING
  369. "%s: unlock failed, writes may not work\n",
  370. mtd->name);
  371. }
  372. #ifndef __UBOOT__
  373. /* Caller should have set dev.parent to match the
  374. * physical device.
  375. */
  376. mtd->dev.type = &mtd_devtype;
  377. mtd->dev.class = &mtd_class;
  378. mtd->dev.devt = MTD_DEVT(i);
  379. dev_set_name(&mtd->dev, "mtd%d", i);
  380. dev_set_drvdata(&mtd->dev, mtd);
  381. if (device_register(&mtd->dev) != 0)
  382. goto fail_added;
  383. if (MTD_DEVT(i))
  384. device_create(&mtd_class, mtd->dev.parent,
  385. MTD_DEVT(i) + 1,
  386. NULL, "mtd%dro", i);
  387. pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
  388. /* No need to get a refcount on the module containing
  389. the notifier, since we hold the mtd_table_mutex */
  390. list_for_each_entry(not, &mtd_notifiers, list)
  391. not->add(mtd);
  392. #else
  393. pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
  394. #endif
  395. mutex_unlock(&mtd_table_mutex);
  396. /* We _know_ we aren't being removed, because
  397. our caller is still holding us here. So none
  398. of this try_ nonsense, and no bitching about it
  399. either. :) */
  400. __module_get(THIS_MODULE);
  401. return 0;
  402. #ifndef __UBOOT__
  403. fail_added:
  404. idr_remove(&mtd_idr, i);
  405. #endif
  406. fail_locked:
  407. mutex_unlock(&mtd_table_mutex);
  408. return 1;
  409. }
  410. /**
  411. * del_mtd_device - unregister an MTD device
  412. * @mtd: pointer to MTD device info structure
  413. *
  414. * Remove a device from the list of MTD devices present in the system,
  415. * and notify each currently active MTD 'user' of its departure.
  416. * Returns zero on success or 1 on failure, which currently will happen
  417. * if the requested device does not appear to be present in the list.
  418. */
  419. int del_mtd_device(struct mtd_info *mtd)
  420. {
  421. int ret;
  422. #ifndef __UBOOT__
  423. struct mtd_notifier *not;
  424. #endif
  425. mutex_lock(&mtd_table_mutex);
  426. if (idr_find(&mtd_idr, mtd->index) != mtd) {
  427. ret = -ENODEV;
  428. goto out_error;
  429. }
  430. #ifndef __UBOOT__
  431. /* No need to get a refcount on the module containing
  432. the notifier, since we hold the mtd_table_mutex */
  433. list_for_each_entry(not, &mtd_notifiers, list)
  434. not->remove(mtd);
  435. #endif
  436. if (mtd->usecount) {
  437. printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
  438. mtd->index, mtd->name, mtd->usecount);
  439. ret = -EBUSY;
  440. } else {
  441. #ifndef __UBOOT__
  442. device_unregister(&mtd->dev);
  443. #endif
  444. idr_remove(&mtd_idr, mtd->index);
  445. module_put(THIS_MODULE);
  446. ret = 0;
  447. }
  448. out_error:
  449. mutex_unlock(&mtd_table_mutex);
  450. return ret;
  451. }
  452. #ifndef __UBOOT__
  453. /**
  454. * mtd_device_parse_register - parse partitions and register an MTD device.
  455. *
  456. * @mtd: the MTD device to register
  457. * @types: the list of MTD partition probes to try, see
  458. * 'parse_mtd_partitions()' for more information
  459. * @parser_data: MTD partition parser-specific data
  460. * @parts: fallback partition information to register, if parsing fails;
  461. * only valid if %nr_parts > %0
  462. * @nr_parts: the number of partitions in parts, if zero then the full
  463. * MTD device is registered if no partition info is found
  464. *
  465. * This function aggregates MTD partitions parsing (done by
  466. * 'parse_mtd_partitions()') and MTD device and partitions registering. It
  467. * basically follows the most common pattern found in many MTD drivers:
  468. *
  469. * * It first tries to probe partitions on MTD device @mtd using parsers
  470. * specified in @types (if @types is %NULL, then the default list of parsers
  471. * is used, see 'parse_mtd_partitions()' for more information). If none are
  472. * found this functions tries to fallback to information specified in
  473. * @parts/@nr_parts.
  474. * * If any partitioning info was found, this function registers the found
  475. * partitions.
  476. * * If no partitions were found this function just registers the MTD device
  477. * @mtd and exits.
  478. *
  479. * Returns zero in case of success and a negative error code in case of failure.
  480. */
  481. int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
  482. struct mtd_part_parser_data *parser_data,
  483. const struct mtd_partition *parts,
  484. int nr_parts)
  485. {
  486. int err;
  487. struct mtd_partition *real_parts;
  488. err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
  489. if (err <= 0 && nr_parts && parts) {
  490. real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
  491. GFP_KERNEL);
  492. if (!real_parts)
  493. err = -ENOMEM;
  494. else
  495. err = nr_parts;
  496. }
  497. if (err > 0) {
  498. err = add_mtd_partitions(mtd, real_parts, err);
  499. kfree(real_parts);
  500. } else if (err == 0) {
  501. err = add_mtd_device(mtd);
  502. if (err == 1)
  503. err = -ENODEV;
  504. }
  505. return err;
  506. }
  507. EXPORT_SYMBOL_GPL(mtd_device_parse_register);
  508. /**
  509. * mtd_device_unregister - unregister an existing MTD device.
  510. *
  511. * @master: the MTD device to unregister. This will unregister both the master
  512. * and any partitions if registered.
  513. */
  514. int mtd_device_unregister(struct mtd_info *master)
  515. {
  516. int err;
  517. err = del_mtd_partitions(master);
  518. if (err)
  519. return err;
  520. if (!device_is_registered(&master->dev))
  521. return 0;
  522. return del_mtd_device(master);
  523. }
  524. EXPORT_SYMBOL_GPL(mtd_device_unregister);
  525. /**
  526. * register_mtd_user - register a 'user' of MTD devices.
  527. * @new: pointer to notifier info structure
  528. *
  529. * Registers a pair of callbacks function to be called upon addition
  530. * or removal of MTD devices. Causes the 'add' callback to be immediately
  531. * invoked for each MTD device currently present in the system.
  532. */
  533. void register_mtd_user (struct mtd_notifier *new)
  534. {
  535. struct mtd_info *mtd;
  536. mutex_lock(&mtd_table_mutex);
  537. list_add(&new->list, &mtd_notifiers);
  538. __module_get(THIS_MODULE);
  539. mtd_for_each_device(mtd)
  540. new->add(mtd);
  541. mutex_unlock(&mtd_table_mutex);
  542. }
  543. EXPORT_SYMBOL_GPL(register_mtd_user);
  544. /**
  545. * unregister_mtd_user - unregister a 'user' of MTD devices.
  546. * @old: pointer to notifier info structure
  547. *
  548. * Removes a callback function pair from the list of 'users' to be
  549. * notified upon addition or removal of MTD devices. Causes the
  550. * 'remove' callback to be immediately invoked for each MTD device
  551. * currently present in the system.
  552. */
  553. int unregister_mtd_user (struct mtd_notifier *old)
  554. {
  555. struct mtd_info *mtd;
  556. mutex_lock(&mtd_table_mutex);
  557. module_put(THIS_MODULE);
  558. mtd_for_each_device(mtd)
  559. old->remove(mtd);
  560. list_del(&old->list);
  561. mutex_unlock(&mtd_table_mutex);
  562. return 0;
  563. }
  564. EXPORT_SYMBOL_GPL(unregister_mtd_user);
  565. #endif
  566. /**
  567. * get_mtd_device - obtain a validated handle for an MTD device
  568. * @mtd: last known address of the required MTD device
  569. * @num: internal device number of the required MTD device
  570. *
  571. * Given a number and NULL address, return the num'th entry in the device
  572. * table, if any. Given an address and num == -1, search the device table
  573. * for a device with that address and return if it's still present. Given
  574. * both, return the num'th driver only if its address matches. Return
  575. * error code if not.
  576. */
  577. struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
  578. {
  579. struct mtd_info *ret = NULL, *other;
  580. int err = -ENODEV;
  581. mutex_lock(&mtd_table_mutex);
  582. if (num == -1) {
  583. mtd_for_each_device(other) {
  584. if (other == mtd) {
  585. ret = mtd;
  586. break;
  587. }
  588. }
  589. } else if (num >= 0) {
  590. ret = idr_find(&mtd_idr, num);
  591. if (mtd && mtd != ret)
  592. ret = NULL;
  593. }
  594. if (!ret) {
  595. ret = ERR_PTR(err);
  596. goto out;
  597. }
  598. err = __get_mtd_device(ret);
  599. if (err)
  600. ret = ERR_PTR(err);
  601. out:
  602. mutex_unlock(&mtd_table_mutex);
  603. return ret;
  604. }
  605. EXPORT_SYMBOL_GPL(get_mtd_device);
  606. int __get_mtd_device(struct mtd_info *mtd)
  607. {
  608. int err;
  609. if (!try_module_get(mtd->owner))
  610. return -ENODEV;
  611. if (mtd->_get_device) {
  612. err = mtd->_get_device(mtd);
  613. if (err) {
  614. module_put(mtd->owner);
  615. return err;
  616. }
  617. }
  618. mtd->usecount++;
  619. return 0;
  620. }
  621. EXPORT_SYMBOL_GPL(__get_mtd_device);
  622. /**
  623. * get_mtd_device_nm - obtain a validated handle for an MTD device by
  624. * device name
  625. * @name: MTD device name to open
  626. *
  627. * This function returns MTD device description structure in case of
  628. * success and an error code in case of failure.
  629. */
  630. struct mtd_info *get_mtd_device_nm(const char *name)
  631. {
  632. int err = -ENODEV;
  633. struct mtd_info *mtd = NULL, *other;
  634. mutex_lock(&mtd_table_mutex);
  635. mtd_for_each_device(other) {
  636. if (!strcmp(name, other->name)) {
  637. mtd = other;
  638. break;
  639. }
  640. }
  641. if (!mtd)
  642. goto out_unlock;
  643. err = __get_mtd_device(mtd);
  644. if (err)
  645. goto out_unlock;
  646. mutex_unlock(&mtd_table_mutex);
  647. return mtd;
  648. out_unlock:
  649. mutex_unlock(&mtd_table_mutex);
  650. return ERR_PTR(err);
  651. }
  652. EXPORT_SYMBOL_GPL(get_mtd_device_nm);
  653. #if defined(CONFIG_CMD_MTDPARTS_SPREAD)
  654. /**
  655. * mtd_get_len_incl_bad
  656. *
  657. * Check if length including bad blocks fits into device.
  658. *
  659. * @param mtd an MTD device
  660. * @param offset offset in flash
  661. * @param length image length
  662. * @return image length including bad blocks in *len_incl_bad and whether or not
  663. * the length returned was truncated in *truncated
  664. */
  665. void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset,
  666. const uint64_t length, uint64_t *len_incl_bad,
  667. int *truncated)
  668. {
  669. *truncated = 0;
  670. *len_incl_bad = 0;
  671. if (!mtd->_block_isbad) {
  672. *len_incl_bad = length;
  673. return;
  674. }
  675. uint64_t len_excl_bad = 0;
  676. uint64_t block_len;
  677. while (len_excl_bad < length) {
  678. if (offset >= mtd->size) {
  679. *truncated = 1;
  680. return;
  681. }
  682. block_len = mtd->erasesize - (offset & (mtd->erasesize - 1));
  683. if (!mtd->_block_isbad(mtd, offset & ~(mtd->erasesize - 1)))
  684. len_excl_bad += block_len;
  685. *len_incl_bad += block_len;
  686. offset += block_len;
  687. }
  688. }
  689. #endif /* defined(CONFIG_CMD_MTDPARTS_SPREAD) */
  690. void put_mtd_device(struct mtd_info *mtd)
  691. {
  692. mutex_lock(&mtd_table_mutex);
  693. __put_mtd_device(mtd);
  694. mutex_unlock(&mtd_table_mutex);
  695. }
  696. EXPORT_SYMBOL_GPL(put_mtd_device);
  697. void __put_mtd_device(struct mtd_info *mtd)
  698. {
  699. --mtd->usecount;
  700. BUG_ON(mtd->usecount < 0);
  701. if (mtd->_put_device)
  702. mtd->_put_device(mtd);
  703. module_put(mtd->owner);
  704. }
  705. EXPORT_SYMBOL_GPL(__put_mtd_device);
  706. /*
  707. * Erase is an asynchronous operation. Device drivers are supposed
  708. * to call instr->callback() whenever the operation completes, even
  709. * if it completes with a failure.
  710. * Callers are supposed to pass a callback function and wait for it
  711. * to be called before writing to the block.
  712. */
  713. int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
  714. {
  715. if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
  716. return -EINVAL;
  717. if (!(mtd->flags & MTD_WRITEABLE))
  718. return -EROFS;
  719. instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
  720. if (!instr->len) {
  721. instr->state = MTD_ERASE_DONE;
  722. mtd_erase_callback(instr);
  723. return 0;
  724. }
  725. return mtd->_erase(mtd, instr);
  726. }
  727. EXPORT_SYMBOL_GPL(mtd_erase);
  728. #ifndef __UBOOT__
  729. /*
  730. * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
  731. */
  732. int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
  733. void **virt, resource_size_t *phys)
  734. {
  735. *retlen = 0;
  736. *virt = NULL;
  737. if (phys)
  738. *phys = 0;
  739. if (!mtd->_point)
  740. return -EOPNOTSUPP;
  741. if (from < 0 || from > mtd->size || len > mtd->size - from)
  742. return -EINVAL;
  743. if (!len)
  744. return 0;
  745. return mtd->_point(mtd, from, len, retlen, virt, phys);
  746. }
  747. EXPORT_SYMBOL_GPL(mtd_point);
  748. /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
  749. int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
  750. {
  751. if (!mtd->_point)
  752. return -EOPNOTSUPP;
  753. if (from < 0 || from > mtd->size || len > mtd->size - from)
  754. return -EINVAL;
  755. if (!len)
  756. return 0;
  757. return mtd->_unpoint(mtd, from, len);
  758. }
  759. EXPORT_SYMBOL_GPL(mtd_unpoint);
  760. #endif
  761. /*
  762. * Allow NOMMU mmap() to directly map the device (if not NULL)
  763. * - return the address to which the offset maps
  764. * - return -ENOSYS to indicate refusal to do the mapping
  765. */
  766. unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
  767. unsigned long offset, unsigned long flags)
  768. {
  769. if (!mtd->_get_unmapped_area)
  770. return -EOPNOTSUPP;
  771. if (offset > mtd->size || len > mtd->size - offset)
  772. return -EINVAL;
  773. return mtd->_get_unmapped_area(mtd, len, offset, flags);
  774. }
  775. EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
  776. int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
  777. u_char *buf)
  778. {
  779. int ret_code;
  780. *retlen = 0;
  781. if (from < 0 || from > mtd->size || len > mtd->size - from)
  782. return -EINVAL;
  783. if (!len)
  784. return 0;
  785. /*
  786. * In the absence of an error, drivers return a non-negative integer
  787. * representing the maximum number of bitflips that were corrected on
  788. * any one ecc region (if applicable; zero otherwise).
  789. */
  790. ret_code = mtd->_read(mtd, from, len, retlen, buf);
  791. if (unlikely(ret_code < 0))
  792. return ret_code;
  793. if (mtd->ecc_strength == 0)
  794. return 0; /* device lacks ecc */
  795. return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
  796. }
  797. EXPORT_SYMBOL_GPL(mtd_read);
  798. int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
  799. const u_char *buf)
  800. {
  801. *retlen = 0;
  802. if (to < 0 || to > mtd->size || len > mtd->size - to)
  803. return -EINVAL;
  804. if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
  805. return -EROFS;
  806. if (!len)
  807. return 0;
  808. return mtd->_write(mtd, to, len, retlen, buf);
  809. }
  810. EXPORT_SYMBOL_GPL(mtd_write);
  811. /*
  812. * In blackbox flight recorder like scenarios we want to make successful writes
  813. * in interrupt context. panic_write() is only intended to be called when its
  814. * known the kernel is about to panic and we need the write to succeed. Since
  815. * the kernel is not going to be running for much longer, this function can
  816. * break locks and delay to ensure the write succeeds (but not sleep).
  817. */
  818. int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
  819. const u_char *buf)
  820. {
  821. *retlen = 0;
  822. if (!mtd->_panic_write)
  823. return -EOPNOTSUPP;
  824. if (to < 0 || to > mtd->size || len > mtd->size - to)
  825. return -EINVAL;
  826. if (!(mtd->flags & MTD_WRITEABLE))
  827. return -EROFS;
  828. if (!len)
  829. return 0;
  830. return mtd->_panic_write(mtd, to, len, retlen, buf);
  831. }
  832. EXPORT_SYMBOL_GPL(mtd_panic_write);
  833. int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
  834. {
  835. int ret_code;
  836. ops->retlen = ops->oobretlen = 0;
  837. if (!mtd->_read_oob)
  838. return -EOPNOTSUPP;
  839. /*
  840. * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
  841. * similar to mtd->_read(), returning a non-negative integer
  842. * representing max bitflips. In other cases, mtd->_read_oob() may
  843. * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
  844. */
  845. ret_code = mtd->_read_oob(mtd, from, ops);
  846. if (unlikely(ret_code < 0))
  847. return ret_code;
  848. if (mtd->ecc_strength == 0)
  849. return 0; /* device lacks ecc */
  850. return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
  851. }
  852. EXPORT_SYMBOL_GPL(mtd_read_oob);
  853. /**
  854. * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
  855. * @mtd: MTD device structure
  856. * @section: ECC section. Depending on the layout you may have all the ECC
  857. * bytes stored in a single contiguous section, or one section
  858. * per ECC chunk (and sometime several sections for a single ECC
  859. * ECC chunk)
  860. * @oobecc: OOB region struct filled with the appropriate ECC position
  861. * information
  862. *
  863. * This function returns ECC section information in the OOB area. If you want
  864. * to get all the ECC bytes information, then you should call
  865. * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
  866. *
  867. * Returns zero on success, a negative error code otherwise.
  868. */
  869. int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
  870. struct mtd_oob_region *oobecc)
  871. {
  872. memset(oobecc, 0, sizeof(*oobecc));
  873. if (!mtd || section < 0)
  874. return -EINVAL;
  875. if (!mtd->ooblayout || !mtd->ooblayout->ecc)
  876. return -ENOTSUPP;
  877. return mtd->ooblayout->ecc(mtd, section, oobecc);
  878. }
  879. EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
  880. /**
  881. * mtd_ooblayout_free - Get the OOB region definition of a specific free
  882. * section
  883. * @mtd: MTD device structure
  884. * @section: Free section you are interested in. Depending on the layout
  885. * you may have all the free bytes stored in a single contiguous
  886. * section, or one section per ECC chunk plus an extra section
  887. * for the remaining bytes (or other funky layout).
  888. * @oobfree: OOB region struct filled with the appropriate free position
  889. * information
  890. *
  891. * This function returns free bytes position in the OOB area. If you want
  892. * to get all the free bytes information, then you should call
  893. * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
  894. *
  895. * Returns zero on success, a negative error code otherwise.
  896. */
  897. int mtd_ooblayout_free(struct mtd_info *mtd, int section,
  898. struct mtd_oob_region *oobfree)
  899. {
  900. memset(oobfree, 0, sizeof(*oobfree));
  901. if (!mtd || section < 0)
  902. return -EINVAL;
  903. if (!mtd->ooblayout || !mtd->ooblayout->free)
  904. return -ENOTSUPP;
  905. return mtd->ooblayout->free(mtd, section, oobfree);
  906. }
  907. EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
  908. /**
  909. * mtd_ooblayout_find_region - Find the region attached to a specific byte
  910. * @mtd: mtd info structure
  911. * @byte: the byte we are searching for
  912. * @sectionp: pointer where the section id will be stored
  913. * @oobregion: used to retrieve the ECC position
  914. * @iter: iterator function. Should be either mtd_ooblayout_free or
  915. * mtd_ooblayout_ecc depending on the region type you're searching for
  916. *
  917. * This function returns the section id and oobregion information of a
  918. * specific byte. For example, say you want to know where the 4th ECC byte is
  919. * stored, you'll use:
  920. *
  921. * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
  922. *
  923. * Returns zero on success, a negative error code otherwise.
  924. */
  925. static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
  926. int *sectionp, struct mtd_oob_region *oobregion,
  927. int (*iter)(struct mtd_info *,
  928. int section,
  929. struct mtd_oob_region *oobregion))
  930. {
  931. int pos = 0, ret, section = 0;
  932. memset(oobregion, 0, sizeof(*oobregion));
  933. while (1) {
  934. ret = iter(mtd, section, oobregion);
  935. if (ret)
  936. return ret;
  937. if (pos + oobregion->length > byte)
  938. break;
  939. pos += oobregion->length;
  940. section++;
  941. }
  942. /*
  943. * Adjust region info to make it start at the beginning at the
  944. * 'start' ECC byte.
  945. */
  946. oobregion->offset += byte - pos;
  947. oobregion->length -= byte - pos;
  948. *sectionp = section;
  949. return 0;
  950. }
  951. /**
  952. * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
  953. * ECC byte
  954. * @mtd: mtd info structure
  955. * @eccbyte: the byte we are searching for
  956. * @sectionp: pointer where the section id will be stored
  957. * @oobregion: OOB region information
  958. *
  959. * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
  960. * byte.
  961. *
  962. * Returns zero on success, a negative error code otherwise.
  963. */
  964. int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
  965. int *section,
  966. struct mtd_oob_region *oobregion)
  967. {
  968. return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
  969. mtd_ooblayout_ecc);
  970. }
  971. EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
  972. /**
  973. * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
  974. * @mtd: mtd info structure
  975. * @buf: destination buffer to store OOB bytes
  976. * @oobbuf: OOB buffer
  977. * @start: first byte to retrieve
  978. * @nbytes: number of bytes to retrieve
  979. * @iter: section iterator
  980. *
  981. * Extract bytes attached to a specific category (ECC or free)
  982. * from the OOB buffer and copy them into buf.
  983. *
  984. * Returns zero on success, a negative error code otherwise.
  985. */
  986. static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
  987. const u8 *oobbuf, int start, int nbytes,
  988. int (*iter)(struct mtd_info *,
  989. int section,
  990. struct mtd_oob_region *oobregion))
  991. {
  992. struct mtd_oob_region oobregion;
  993. int section, ret;
  994. ret = mtd_ooblayout_find_region(mtd, start, &section,
  995. &oobregion, iter);
  996. while (!ret) {
  997. int cnt;
  998. cnt = min_t(int, nbytes, oobregion.length);
  999. memcpy(buf, oobbuf + oobregion.offset, cnt);
  1000. buf += cnt;
  1001. nbytes -= cnt;
  1002. if (!nbytes)
  1003. break;
  1004. ret = iter(mtd, ++section, &oobregion);
  1005. }
  1006. return ret;
  1007. }
  1008. /**
  1009. * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
  1010. * @mtd: mtd info structure
  1011. * @buf: source buffer to get OOB bytes from
  1012. * @oobbuf: OOB buffer
  1013. * @start: first OOB byte to set
  1014. * @nbytes: number of OOB bytes to set
  1015. * @iter: section iterator
  1016. *
  1017. * Fill the OOB buffer with data provided in buf. The category (ECC or free)
  1018. * is selected by passing the appropriate iterator.
  1019. *
  1020. * Returns zero on success, a negative error code otherwise.
  1021. */
  1022. static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
  1023. u8 *oobbuf, int start, int nbytes,
  1024. int (*iter)(struct mtd_info *,
  1025. int section,
  1026. struct mtd_oob_region *oobregion))
  1027. {
  1028. struct mtd_oob_region oobregion;
  1029. int section, ret;
  1030. ret = mtd_ooblayout_find_region(mtd, start, &section,
  1031. &oobregion, iter);
  1032. while (!ret) {
  1033. int cnt;
  1034. cnt = min_t(int, nbytes, oobregion.length);
  1035. memcpy(oobbuf + oobregion.offset, buf, cnt);
  1036. buf += cnt;
  1037. nbytes -= cnt;
  1038. if (!nbytes)
  1039. break;
  1040. ret = iter(mtd, ++section, &oobregion);
  1041. }
  1042. return ret;
  1043. }
  1044. /**
  1045. * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
  1046. * @mtd: mtd info structure
  1047. * @iter: category iterator
  1048. *
  1049. * Count the number of bytes in a given category.
  1050. *
  1051. * Returns a positive value on success, a negative error code otherwise.
  1052. */
  1053. static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
  1054. int (*iter)(struct mtd_info *,
  1055. int section,
  1056. struct mtd_oob_region *oobregion))
  1057. {
  1058. struct mtd_oob_region oobregion;
  1059. int section = 0, ret, nbytes = 0;
  1060. while (1) {
  1061. ret = iter(mtd, section++, &oobregion);
  1062. if (ret) {
  1063. if (ret == -ERANGE)
  1064. ret = nbytes;
  1065. break;
  1066. }
  1067. nbytes += oobregion.length;
  1068. }
  1069. return ret;
  1070. }
  1071. /**
  1072. * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
  1073. * @mtd: mtd info structure
  1074. * @eccbuf: destination buffer to store ECC bytes
  1075. * @oobbuf: OOB buffer
  1076. * @start: first ECC byte to retrieve
  1077. * @nbytes: number of ECC bytes to retrieve
  1078. *
  1079. * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
  1080. *
  1081. * Returns zero on success, a negative error code otherwise.
  1082. */
  1083. int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
  1084. const u8 *oobbuf, int start, int nbytes)
  1085. {
  1086. return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
  1087. mtd_ooblayout_ecc);
  1088. }
  1089. EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
  1090. /**
  1091. * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
  1092. * @mtd: mtd info structure
  1093. * @eccbuf: source buffer to get ECC bytes from
  1094. * @oobbuf: OOB buffer
  1095. * @start: first ECC byte to set
  1096. * @nbytes: number of ECC bytes to set
  1097. *
  1098. * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
  1099. *
  1100. * Returns zero on success, a negative error code otherwise.
  1101. */
  1102. int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
  1103. u8 *oobbuf, int start, int nbytes)
  1104. {
  1105. return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
  1106. mtd_ooblayout_ecc);
  1107. }
  1108. EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
  1109. /**
  1110. * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
  1111. * @mtd: mtd info structure
  1112. * @databuf: destination buffer to store ECC bytes
  1113. * @oobbuf: OOB buffer
  1114. * @start: first ECC byte to retrieve
  1115. * @nbytes: number of ECC bytes to retrieve
  1116. *
  1117. * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
  1118. *
  1119. * Returns zero on success, a negative error code otherwise.
  1120. */
  1121. int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
  1122. const u8 *oobbuf, int start, int nbytes)
  1123. {
  1124. return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
  1125. mtd_ooblayout_free);
  1126. }
  1127. EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
  1128. /**
  1129. * mtd_ooblayout_get_eccbytes - set data bytes into the oob buffer
  1130. * @mtd: mtd info structure
  1131. * @eccbuf: source buffer to get data bytes from
  1132. * @oobbuf: OOB buffer
  1133. * @start: first ECC byte to set
  1134. * @nbytes: number of ECC bytes to set
  1135. *
  1136. * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
  1137. *
  1138. * Returns zero on success, a negative error code otherwise.
  1139. */
  1140. int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
  1141. u8 *oobbuf, int start, int nbytes)
  1142. {
  1143. return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
  1144. mtd_ooblayout_free);
  1145. }
  1146. EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
  1147. /**
  1148. * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
  1149. * @mtd: mtd info structure
  1150. *
  1151. * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
  1152. *
  1153. * Returns zero on success, a negative error code otherwise.
  1154. */
  1155. int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
  1156. {
  1157. return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
  1158. }
  1159. EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
  1160. /**
  1161. * mtd_ooblayout_count_freebytes - count the number of ECC bytes in OOB
  1162. * @mtd: mtd info structure
  1163. *
  1164. * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
  1165. *
  1166. * Returns zero on success, a negative error code otherwise.
  1167. */
  1168. int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
  1169. {
  1170. return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
  1171. }
  1172. EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
  1173. /*
  1174. * Method to access the protection register area, present in some flash
  1175. * devices. The user data is one time programmable but the factory data is read
  1176. * only.
  1177. */
  1178. int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
  1179. struct otp_info *buf)
  1180. {
  1181. if (!mtd->_get_fact_prot_info)
  1182. return -EOPNOTSUPP;
  1183. if (!len)
  1184. return 0;
  1185. return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
  1186. }
  1187. EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
  1188. int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
  1189. size_t *retlen, u_char *buf)
  1190. {
  1191. *retlen = 0;
  1192. if (!mtd->_read_fact_prot_reg)
  1193. return -EOPNOTSUPP;
  1194. if (!len)
  1195. return 0;
  1196. return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
  1197. }
  1198. EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
  1199. int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
  1200. struct otp_info *buf)
  1201. {
  1202. if (!mtd->_get_user_prot_info)
  1203. return -EOPNOTSUPP;
  1204. if (!len)
  1205. return 0;
  1206. return mtd->_get_user_prot_info(mtd, len, retlen, buf);
  1207. }
  1208. EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
  1209. int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
  1210. size_t *retlen, u_char *buf)
  1211. {
  1212. *retlen = 0;
  1213. if (!mtd->_read_user_prot_reg)
  1214. return -EOPNOTSUPP;
  1215. if (!len)
  1216. return 0;
  1217. return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
  1218. }
  1219. EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
  1220. int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
  1221. size_t *retlen, u_char *buf)
  1222. {
  1223. int ret;
  1224. *retlen = 0;
  1225. if (!mtd->_write_user_prot_reg)
  1226. return -EOPNOTSUPP;
  1227. if (!len)
  1228. return 0;
  1229. ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
  1230. if (ret)
  1231. return ret;
  1232. /*
  1233. * If no data could be written at all, we are out of memory and
  1234. * must return -ENOSPC.
  1235. */
  1236. return (*retlen) ? 0 : -ENOSPC;
  1237. }
  1238. EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
  1239. int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
  1240. {
  1241. if (!mtd->_lock_user_prot_reg)
  1242. return -EOPNOTSUPP;
  1243. if (!len)
  1244. return 0;
  1245. return mtd->_lock_user_prot_reg(mtd, from, len);
  1246. }
  1247. EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
  1248. /* Chip-supported device locking */
  1249. int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  1250. {
  1251. if (!mtd->_lock)
  1252. return -EOPNOTSUPP;
  1253. if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
  1254. return -EINVAL;
  1255. if (!len)
  1256. return 0;
  1257. return mtd->_lock(mtd, ofs, len);
  1258. }
  1259. EXPORT_SYMBOL_GPL(mtd_lock);
  1260. int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  1261. {
  1262. if (!mtd->_unlock)
  1263. return -EOPNOTSUPP;
  1264. if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
  1265. return -EINVAL;
  1266. if (!len)
  1267. return 0;
  1268. return mtd->_unlock(mtd, ofs, len);
  1269. }
  1270. EXPORT_SYMBOL_GPL(mtd_unlock);
  1271. int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  1272. {
  1273. if (!mtd->_is_locked)
  1274. return -EOPNOTSUPP;
  1275. if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
  1276. return -EINVAL;
  1277. if (!len)
  1278. return 0;
  1279. return mtd->_is_locked(mtd, ofs, len);
  1280. }
  1281. EXPORT_SYMBOL_GPL(mtd_is_locked);
  1282. int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
  1283. {
  1284. if (ofs < 0 || ofs > mtd->size)
  1285. return -EINVAL;
  1286. if (!mtd->_block_isreserved)
  1287. return 0;
  1288. return mtd->_block_isreserved(mtd, ofs);
  1289. }
  1290. EXPORT_SYMBOL_GPL(mtd_block_isreserved);
  1291. int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
  1292. {
  1293. if (ofs < 0 || ofs > mtd->size)
  1294. return -EINVAL;
  1295. if (!mtd->_block_isbad)
  1296. return 0;
  1297. return mtd->_block_isbad(mtd, ofs);
  1298. }
  1299. EXPORT_SYMBOL_GPL(mtd_block_isbad);
  1300. int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
  1301. {
  1302. if (!mtd->_block_markbad)
  1303. return -EOPNOTSUPP;
  1304. if (ofs < 0 || ofs > mtd->size)
  1305. return -EINVAL;
  1306. if (!(mtd->flags & MTD_WRITEABLE))
  1307. return -EROFS;
  1308. return mtd->_block_markbad(mtd, ofs);
  1309. }
  1310. EXPORT_SYMBOL_GPL(mtd_block_markbad);
  1311. #ifndef __UBOOT__
  1312. /*
  1313. * default_mtd_writev - the default writev method
  1314. * @mtd: mtd device description object pointer
  1315. * @vecs: the vectors to write
  1316. * @count: count of vectors in @vecs
  1317. * @to: the MTD device offset to write to
  1318. * @retlen: on exit contains the count of bytes written to the MTD device.
  1319. *
  1320. * This function returns zero in case of success and a negative error code in
  1321. * case of failure.
  1322. */
  1323. static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
  1324. unsigned long count, loff_t to, size_t *retlen)
  1325. {
  1326. unsigned long i;
  1327. size_t totlen = 0, thislen;
  1328. int ret = 0;
  1329. for (i = 0; i < count; i++) {
  1330. if (!vecs[i].iov_len)
  1331. continue;
  1332. ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
  1333. vecs[i].iov_base);
  1334. totlen += thislen;
  1335. if (ret || thislen != vecs[i].iov_len)
  1336. break;
  1337. to += vecs[i].iov_len;
  1338. }
  1339. *retlen = totlen;
  1340. return ret;
  1341. }
  1342. /*
  1343. * mtd_writev - the vector-based MTD write method
  1344. * @mtd: mtd device description object pointer
  1345. * @vecs: the vectors to write
  1346. * @count: count of vectors in @vecs
  1347. * @to: the MTD device offset to write to
  1348. * @retlen: on exit contains the count of bytes written to the MTD device.
  1349. *
  1350. * This function returns zero in case of success and a negative error code in
  1351. * case of failure.
  1352. */
  1353. int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
  1354. unsigned long count, loff_t to, size_t *retlen)
  1355. {
  1356. *retlen = 0;
  1357. if (!(mtd->flags & MTD_WRITEABLE))
  1358. return -EROFS;
  1359. if (!mtd->_writev)
  1360. return default_mtd_writev(mtd, vecs, count, to, retlen);
  1361. return mtd->_writev(mtd, vecs, count, to, retlen);
  1362. }
  1363. EXPORT_SYMBOL_GPL(mtd_writev);
  1364. /**
  1365. * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
  1366. * @mtd: mtd device description object pointer
  1367. * @size: a pointer to the ideal or maximum size of the allocation, points
  1368. * to the actual allocation size on success.
  1369. *
  1370. * This routine attempts to allocate a contiguous kernel buffer up to
  1371. * the specified size, backing off the size of the request exponentially
  1372. * until the request succeeds or until the allocation size falls below
  1373. * the system page size. This attempts to make sure it does not adversely
  1374. * impact system performance, so when allocating more than one page, we
  1375. * ask the memory allocator to avoid re-trying, swapping, writing back
  1376. * or performing I/O.
  1377. *
  1378. * Note, this function also makes sure that the allocated buffer is aligned to
  1379. * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
  1380. *
  1381. * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
  1382. * to handle smaller (i.e. degraded) buffer allocations under low- or
  1383. * fragmented-memory situations where such reduced allocations, from a
  1384. * requested ideal, are allowed.
  1385. *
  1386. * Returns a pointer to the allocated buffer on success; otherwise, NULL.
  1387. */
  1388. void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
  1389. {
  1390. gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
  1391. __GFP_NORETRY | __GFP_NO_KSWAPD;
  1392. size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
  1393. void *kbuf;
  1394. *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
  1395. while (*size > min_alloc) {
  1396. kbuf = kmalloc(*size, flags);
  1397. if (kbuf)
  1398. return kbuf;
  1399. *size >>= 1;
  1400. *size = ALIGN(*size, mtd->writesize);
  1401. }
  1402. /*
  1403. * For the last resort allocation allow 'kmalloc()' to do all sorts of
  1404. * things (write-back, dropping caches, etc) by using GFP_KERNEL.
  1405. */
  1406. return kmalloc(*size, GFP_KERNEL);
  1407. }
  1408. EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
  1409. #endif
  1410. #ifdef CONFIG_PROC_FS
  1411. /*====================================================================*/
  1412. /* Support for /proc/mtd */
  1413. static int mtd_proc_show(struct seq_file *m, void *v)
  1414. {
  1415. struct mtd_info *mtd;
  1416. seq_puts(m, "dev: size erasesize name\n");
  1417. mutex_lock(&mtd_table_mutex);
  1418. mtd_for_each_device(mtd) {
  1419. seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
  1420. mtd->index, (unsigned long long)mtd->size,
  1421. mtd->erasesize, mtd->name);
  1422. }
  1423. mutex_unlock(&mtd_table_mutex);
  1424. return 0;
  1425. }
  1426. static int mtd_proc_open(struct inode *inode, struct file *file)
  1427. {
  1428. return single_open(file, mtd_proc_show, NULL);
  1429. }
  1430. static const struct file_operations mtd_proc_ops = {
  1431. .open = mtd_proc_open,
  1432. .read = seq_read,
  1433. .llseek = seq_lseek,
  1434. .release = single_release,
  1435. };
  1436. #endif /* CONFIG_PROC_FS */
  1437. /*====================================================================*/
  1438. /* Init code */
  1439. #ifndef __UBOOT__
  1440. static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
  1441. {
  1442. int ret;
  1443. ret = bdi_init(bdi);
  1444. if (!ret)
  1445. ret = bdi_register(bdi, NULL, "%s", name);
  1446. if (ret)
  1447. bdi_destroy(bdi);
  1448. return ret;
  1449. }
  1450. static struct proc_dir_entry *proc_mtd;
  1451. static int __init init_mtd(void)
  1452. {
  1453. int ret;
  1454. ret = class_register(&mtd_class);
  1455. if (ret)
  1456. goto err_reg;
  1457. ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
  1458. if (ret)
  1459. goto err_bdi1;
  1460. ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
  1461. if (ret)
  1462. goto err_bdi2;
  1463. ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
  1464. if (ret)
  1465. goto err_bdi3;
  1466. proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
  1467. ret = init_mtdchar();
  1468. if (ret)
  1469. goto out_procfs;
  1470. return 0;
  1471. out_procfs:
  1472. if (proc_mtd)
  1473. remove_proc_entry("mtd", NULL);
  1474. err_bdi3:
  1475. bdi_destroy(&mtd_bdi_ro_mappable);
  1476. err_bdi2:
  1477. bdi_destroy(&mtd_bdi_unmappable);
  1478. err_bdi1:
  1479. class_unregister(&mtd_class);
  1480. err_reg:
  1481. pr_err("Error registering mtd class or bdi: %d\n", ret);
  1482. return ret;
  1483. }
  1484. static void __exit cleanup_mtd(void)
  1485. {
  1486. cleanup_mtdchar();
  1487. if (proc_mtd)
  1488. remove_proc_entry("mtd", NULL);
  1489. class_unregister(&mtd_class);
  1490. bdi_destroy(&mtd_bdi_unmappable);
  1491. bdi_destroy(&mtd_bdi_ro_mappable);
  1492. bdi_destroy(&mtd_bdi_rw_mappable);
  1493. }
  1494. module_init(init_mtd);
  1495. module_exit(cleanup_mtd);
  1496. #endif
  1497. MODULE_LICENSE("GPL");
  1498. MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
  1499. MODULE_DESCRIPTION("Core MTD registration and access routines");