build.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551
  1. /*
  2. * Copyright (c) International Business Machines Corp., 2006
  3. * Copyright (c) Nokia Corporation, 2007
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. *
  7. * Author: Artem Bityutskiy (Битюцкий Артём),
  8. * Frank Haverkamp
  9. */
  10. /*
  11. * This file includes UBI initialization and building of UBI devices.
  12. *
  13. * When UBI is initialized, it attaches all the MTD devices specified as the
  14. * module load parameters or the kernel boot parameters. If MTD devices were
  15. * specified, UBI does not attach any MTD device, but it is possible to do
  16. * later using the "UBI control device".
  17. */
  18. #ifndef __UBOOT__
  19. #include <linux/module.h>
  20. #include <linux/moduleparam.h>
  21. #include <linux/stringify.h>
  22. #include <linux/namei.h>
  23. #include <linux/stat.h>
  24. #include <linux/miscdevice.h>
  25. #include <linux/log2.h>
  26. #include <linux/kthread.h>
  27. #include <linux/kernel.h>
  28. #include <linux/slab.h>
  29. #include <linux/major.h>
  30. #else
  31. #include <linux/compat.h>
  32. #endif
  33. #include <linux/err.h>
  34. #include <ubi_uboot.h>
  35. #include <linux/mtd/partitions.h>
  36. #include "ubi.h"
  37. /* Maximum length of the 'mtd=' parameter */
  38. #define MTD_PARAM_LEN_MAX 64
  39. /* Maximum number of comma-separated items in the 'mtd=' parameter */
  40. #define MTD_PARAM_MAX_COUNT 4
  41. /* Maximum value for the number of bad PEBs per 1024 PEBs */
  42. #define MAX_MTD_UBI_BEB_LIMIT 768
  43. #ifdef CONFIG_MTD_UBI_MODULE
  44. #define ubi_is_module() 1
  45. #else
  46. #define ubi_is_module() 0
  47. #endif
  48. #if (CONFIG_SYS_MALLOC_LEN < (512 << 10))
  49. #error Malloc area too small for UBI, increase CONFIG_SYS_MALLOC_LEN to >= 512k
  50. #endif
  51. /**
  52. * struct mtd_dev_param - MTD device parameter description data structure.
  53. * @name: MTD character device node path, MTD device name, or MTD device number
  54. * string
  55. * @vid_hdr_offs: VID header offset
  56. * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
  57. */
  58. struct mtd_dev_param {
  59. char name[MTD_PARAM_LEN_MAX];
  60. int ubi_num;
  61. int vid_hdr_offs;
  62. int max_beb_per1024;
  63. };
  64. /* Numbers of elements set in the @mtd_dev_param array */
  65. static int __initdata mtd_devs;
  66. /* MTD devices specification parameters */
  67. static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
  68. #ifndef __UBOOT__
  69. #ifdef CONFIG_MTD_UBI_FASTMAP
  70. /* UBI module parameter to enable fastmap automatically on non-fastmap images */
  71. static bool fm_autoconvert;
  72. #endif
  73. #else
  74. #ifdef CONFIG_MTD_UBI_FASTMAP
  75. #if !defined(CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT)
  76. #define CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT 0
  77. #endif
  78. static bool fm_autoconvert = CONFIG_MTD_UBI_FASTMAP_AUTOCONVERT;
  79. #endif
  80. #endif
  81. /* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
  82. struct class *ubi_class;
  83. /* Slab cache for wear-leveling entries */
  84. struct kmem_cache *ubi_wl_entry_slab;
  85. #ifndef __UBOOT__
  86. /* UBI control character device */
  87. static struct miscdevice ubi_ctrl_cdev = {
  88. .minor = MISC_DYNAMIC_MINOR,
  89. .name = "ubi_ctrl",
  90. .fops = &ubi_ctrl_cdev_operations,
  91. };
  92. #endif
  93. /* All UBI devices in system */
  94. #ifndef __UBOOT__
  95. static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
  96. #else
  97. struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
  98. #endif
  99. #ifndef __UBOOT__
  100. /* Serializes UBI devices creations and removals */
  101. DEFINE_MUTEX(ubi_devices_mutex);
  102. /* Protects @ubi_devices and @ubi->ref_count */
  103. static DEFINE_SPINLOCK(ubi_devices_lock);
  104. /* "Show" method for files in '/<sysfs>/class/ubi/' */
  105. static ssize_t ubi_version_show(struct class *class,
  106. struct class_attribute *attr, char *buf)
  107. {
  108. return sprintf(buf, "%d\n", UBI_VERSION);
  109. }
  110. /* UBI version attribute ('/<sysfs>/class/ubi/version') */
  111. static struct class_attribute ubi_version =
  112. __ATTR(version, S_IRUGO, ubi_version_show, NULL);
  113. static ssize_t dev_attribute_show(struct device *dev,
  114. struct device_attribute *attr, char *buf);
  115. /* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
  116. static struct device_attribute dev_eraseblock_size =
  117. __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
  118. static struct device_attribute dev_avail_eraseblocks =
  119. __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
  120. static struct device_attribute dev_total_eraseblocks =
  121. __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
  122. static struct device_attribute dev_volumes_count =
  123. __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
  124. static struct device_attribute dev_max_ec =
  125. __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
  126. static struct device_attribute dev_reserved_for_bad =
  127. __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
  128. static struct device_attribute dev_bad_peb_count =
  129. __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
  130. static struct device_attribute dev_max_vol_count =
  131. __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
  132. static struct device_attribute dev_min_io_size =
  133. __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
  134. static struct device_attribute dev_bgt_enabled =
  135. __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
  136. static struct device_attribute dev_mtd_num =
  137. __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
  138. #endif
  139. /**
  140. * ubi_volume_notify - send a volume change notification.
  141. * @ubi: UBI device description object
  142. * @vol: volume description object of the changed volume
  143. * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
  144. *
  145. * This is a helper function which notifies all subscribers about a volume
  146. * change event (creation, removal, re-sizing, re-naming, updating). Returns
  147. * zero in case of success and a negative error code in case of failure.
  148. */
  149. int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
  150. {
  151. struct ubi_notification nt;
  152. ubi_do_get_device_info(ubi, &nt.di);
  153. ubi_do_get_volume_info(ubi, vol, &nt.vi);
  154. #ifdef CONFIG_MTD_UBI_FASTMAP
  155. switch (ntype) {
  156. case UBI_VOLUME_ADDED:
  157. case UBI_VOLUME_REMOVED:
  158. case UBI_VOLUME_RESIZED:
  159. case UBI_VOLUME_RENAMED:
  160. if (ubi_update_fastmap(ubi)) {
  161. ubi_err("Unable to update fastmap!");
  162. ubi_ro_mode(ubi);
  163. }
  164. }
  165. #endif
  166. return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
  167. }
  168. /**
  169. * ubi_notify_all - send a notification to all volumes.
  170. * @ubi: UBI device description object
  171. * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
  172. * @nb: the notifier to call
  173. *
  174. * This function walks all volumes of UBI device @ubi and sends the @ntype
  175. * notification for each volume. If @nb is %NULL, then all registered notifiers
  176. * are called, otherwise only the @nb notifier is called. Returns the number of
  177. * sent notifications.
  178. */
  179. int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
  180. {
  181. struct ubi_notification nt;
  182. int i, count = 0;
  183. #ifndef __UBOOT__
  184. int ret;
  185. #endif
  186. ubi_do_get_device_info(ubi, &nt.di);
  187. mutex_lock(&ubi->device_mutex);
  188. for (i = 0; i < ubi->vtbl_slots; i++) {
  189. /*
  190. * Since the @ubi->device is locked, and we are not going to
  191. * change @ubi->volumes, we do not have to lock
  192. * @ubi->volumes_lock.
  193. */
  194. if (!ubi->volumes[i])
  195. continue;
  196. ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
  197. #ifndef __UBOOT__
  198. if (nb)
  199. nb->notifier_call(nb, ntype, &nt);
  200. else
  201. ret = blocking_notifier_call_chain(&ubi_notifiers, ntype,
  202. &nt);
  203. #endif
  204. count += 1;
  205. }
  206. mutex_unlock(&ubi->device_mutex);
  207. return count;
  208. }
  209. /**
  210. * ubi_enumerate_volumes - send "add" notification for all existing volumes.
  211. * @nb: the notifier to call
  212. *
  213. * This function walks all UBI devices and volumes and sends the
  214. * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
  215. * registered notifiers are called, otherwise only the @nb notifier is called.
  216. * Returns the number of sent notifications.
  217. */
  218. int ubi_enumerate_volumes(struct notifier_block *nb)
  219. {
  220. int i, count = 0;
  221. /*
  222. * Since the @ubi_devices_mutex is locked, and we are not going to
  223. * change @ubi_devices, we do not have to lock @ubi_devices_lock.
  224. */
  225. for (i = 0; i < UBI_MAX_DEVICES; i++) {
  226. struct ubi_device *ubi = ubi_devices[i];
  227. if (!ubi)
  228. continue;
  229. count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
  230. }
  231. return count;
  232. }
  233. /**
  234. * ubi_get_device - get UBI device.
  235. * @ubi_num: UBI device number
  236. *
  237. * This function returns UBI device description object for UBI device number
  238. * @ubi_num, or %NULL if the device does not exist. This function increases the
  239. * device reference count to prevent removal of the device. In other words, the
  240. * device cannot be removed if its reference count is not zero.
  241. */
  242. struct ubi_device *ubi_get_device(int ubi_num)
  243. {
  244. struct ubi_device *ubi;
  245. spin_lock(&ubi_devices_lock);
  246. ubi = ubi_devices[ubi_num];
  247. if (ubi) {
  248. ubi_assert(ubi->ref_count >= 0);
  249. ubi->ref_count += 1;
  250. get_device(&ubi->dev);
  251. }
  252. spin_unlock(&ubi_devices_lock);
  253. return ubi;
  254. }
  255. /**
  256. * ubi_put_device - drop an UBI device reference.
  257. * @ubi: UBI device description object
  258. */
  259. void ubi_put_device(struct ubi_device *ubi)
  260. {
  261. spin_lock(&ubi_devices_lock);
  262. ubi->ref_count -= 1;
  263. put_device(&ubi->dev);
  264. spin_unlock(&ubi_devices_lock);
  265. }
  266. /**
  267. * ubi_get_by_major - get UBI device by character device major number.
  268. * @major: major number
  269. *
  270. * This function is similar to 'ubi_get_device()', but it searches the device
  271. * by its major number.
  272. */
  273. struct ubi_device *ubi_get_by_major(int major)
  274. {
  275. int i;
  276. struct ubi_device *ubi;
  277. spin_lock(&ubi_devices_lock);
  278. for (i = 0; i < UBI_MAX_DEVICES; i++) {
  279. ubi = ubi_devices[i];
  280. if (ubi && MAJOR(ubi->cdev.dev) == major) {
  281. ubi_assert(ubi->ref_count >= 0);
  282. ubi->ref_count += 1;
  283. get_device(&ubi->dev);
  284. spin_unlock(&ubi_devices_lock);
  285. return ubi;
  286. }
  287. }
  288. spin_unlock(&ubi_devices_lock);
  289. return NULL;
  290. }
  291. /**
  292. * ubi_major2num - get UBI device number by character device major number.
  293. * @major: major number
  294. *
  295. * This function searches UBI device number object by its major number. If UBI
  296. * device was not found, this function returns -ENODEV, otherwise the UBI device
  297. * number is returned.
  298. */
  299. int ubi_major2num(int major)
  300. {
  301. int i, ubi_num = -ENODEV;
  302. spin_lock(&ubi_devices_lock);
  303. for (i = 0; i < UBI_MAX_DEVICES; i++) {
  304. struct ubi_device *ubi = ubi_devices[i];
  305. if (ubi && MAJOR(ubi->cdev.dev) == major) {
  306. ubi_num = ubi->ubi_num;
  307. break;
  308. }
  309. }
  310. spin_unlock(&ubi_devices_lock);
  311. return ubi_num;
  312. }
  313. #ifndef __UBOOT__
  314. /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
  315. static ssize_t dev_attribute_show(struct device *dev,
  316. struct device_attribute *attr, char *buf)
  317. {
  318. ssize_t ret;
  319. struct ubi_device *ubi;
  320. /*
  321. * The below code looks weird, but it actually makes sense. We get the
  322. * UBI device reference from the contained 'struct ubi_device'. But it
  323. * is unclear if the device was removed or not yet. Indeed, if the
  324. * device was removed before we increased its reference count,
  325. * 'ubi_get_device()' will return -ENODEV and we fail.
  326. *
  327. * Remember, 'struct ubi_device' is freed in the release function, so
  328. * we still can use 'ubi->ubi_num'.
  329. */
  330. ubi = container_of(dev, struct ubi_device, dev);
  331. ubi = ubi_get_device(ubi->ubi_num);
  332. if (!ubi)
  333. return -ENODEV;
  334. if (attr == &dev_eraseblock_size)
  335. ret = sprintf(buf, "%d\n", ubi->leb_size);
  336. else if (attr == &dev_avail_eraseblocks)
  337. ret = sprintf(buf, "%d\n", ubi->avail_pebs);
  338. else if (attr == &dev_total_eraseblocks)
  339. ret = sprintf(buf, "%d\n", ubi->good_peb_count);
  340. else if (attr == &dev_volumes_count)
  341. ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
  342. else if (attr == &dev_max_ec)
  343. ret = sprintf(buf, "%d\n", ubi->max_ec);
  344. else if (attr == &dev_reserved_for_bad)
  345. ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
  346. else if (attr == &dev_bad_peb_count)
  347. ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
  348. else if (attr == &dev_max_vol_count)
  349. ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
  350. else if (attr == &dev_min_io_size)
  351. ret = sprintf(buf, "%d\n", ubi->min_io_size);
  352. else if (attr == &dev_bgt_enabled)
  353. ret = sprintf(buf, "%d\n", ubi->thread_enabled);
  354. else if (attr == &dev_mtd_num)
  355. ret = sprintf(buf, "%d\n", ubi->mtd->index);
  356. else
  357. ret = -EINVAL;
  358. ubi_put_device(ubi);
  359. return ret;
  360. }
  361. static void dev_release(struct device *dev)
  362. {
  363. struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
  364. kfree(ubi);
  365. }
  366. /**
  367. * ubi_sysfs_init - initialize sysfs for an UBI device.
  368. * @ubi: UBI device description object
  369. * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
  370. * taken
  371. *
  372. * This function returns zero in case of success and a negative error code in
  373. * case of failure.
  374. */
  375. static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
  376. {
  377. int err;
  378. ubi->dev.release = dev_release;
  379. ubi->dev.devt = ubi->cdev.dev;
  380. ubi->dev.class = ubi_class;
  381. dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
  382. err = device_register(&ubi->dev);
  383. if (err)
  384. return err;
  385. *ref = 1;
  386. err = device_create_file(&ubi->dev, &dev_eraseblock_size);
  387. if (err)
  388. return err;
  389. err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
  390. if (err)
  391. return err;
  392. err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
  393. if (err)
  394. return err;
  395. err = device_create_file(&ubi->dev, &dev_volumes_count);
  396. if (err)
  397. return err;
  398. err = device_create_file(&ubi->dev, &dev_max_ec);
  399. if (err)
  400. return err;
  401. err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
  402. if (err)
  403. return err;
  404. err = device_create_file(&ubi->dev, &dev_bad_peb_count);
  405. if (err)
  406. return err;
  407. err = device_create_file(&ubi->dev, &dev_max_vol_count);
  408. if (err)
  409. return err;
  410. err = device_create_file(&ubi->dev, &dev_min_io_size);
  411. if (err)
  412. return err;
  413. err = device_create_file(&ubi->dev, &dev_bgt_enabled);
  414. if (err)
  415. return err;
  416. err = device_create_file(&ubi->dev, &dev_mtd_num);
  417. return err;
  418. }
  419. /**
  420. * ubi_sysfs_close - close sysfs for an UBI device.
  421. * @ubi: UBI device description object
  422. */
  423. static void ubi_sysfs_close(struct ubi_device *ubi)
  424. {
  425. device_remove_file(&ubi->dev, &dev_mtd_num);
  426. device_remove_file(&ubi->dev, &dev_bgt_enabled);
  427. device_remove_file(&ubi->dev, &dev_min_io_size);
  428. device_remove_file(&ubi->dev, &dev_max_vol_count);
  429. device_remove_file(&ubi->dev, &dev_bad_peb_count);
  430. device_remove_file(&ubi->dev, &dev_reserved_for_bad);
  431. device_remove_file(&ubi->dev, &dev_max_ec);
  432. device_remove_file(&ubi->dev, &dev_volumes_count);
  433. device_remove_file(&ubi->dev, &dev_total_eraseblocks);
  434. device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
  435. device_remove_file(&ubi->dev, &dev_eraseblock_size);
  436. device_unregister(&ubi->dev);
  437. }
  438. #endif
  439. /**
  440. * kill_volumes - destroy all user volumes.
  441. * @ubi: UBI device description object
  442. */
  443. static void kill_volumes(struct ubi_device *ubi)
  444. {
  445. int i;
  446. for (i = 0; i < ubi->vtbl_slots; i++)
  447. if (ubi->volumes[i])
  448. ubi_free_volume(ubi, ubi->volumes[i]);
  449. }
  450. /**
  451. * uif_init - initialize user interfaces for an UBI device.
  452. * @ubi: UBI device description object
  453. * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
  454. * taken, otherwise set to %0
  455. *
  456. * This function initializes various user interfaces for an UBI device. If the
  457. * initialization fails at an early stage, this function frees all the
  458. * resources it allocated, returns an error, and @ref is set to %0. However,
  459. * if the initialization fails after the UBI device was registered in the
  460. * driver core subsystem, this function takes a reference to @ubi->dev, because
  461. * otherwise the release function ('dev_release()') would free whole @ubi
  462. * object. The @ref argument is set to %1 in this case. The caller has to put
  463. * this reference.
  464. *
  465. * This function returns zero in case of success and a negative error code in
  466. * case of failure.
  467. */
  468. static int uif_init(struct ubi_device *ubi, int *ref)
  469. {
  470. int i, err;
  471. #ifndef __UBOOT__
  472. dev_t dev;
  473. #endif
  474. *ref = 0;
  475. sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
  476. /*
  477. * Major numbers for the UBI character devices are allocated
  478. * dynamically. Major numbers of volume character devices are
  479. * equivalent to ones of the corresponding UBI character device. Minor
  480. * numbers of UBI character devices are 0, while minor numbers of
  481. * volume character devices start from 1. Thus, we allocate one major
  482. * number and ubi->vtbl_slots + 1 minor numbers.
  483. */
  484. err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
  485. if (err) {
  486. ubi_err("cannot register UBI character devices");
  487. return err;
  488. }
  489. ubi_assert(MINOR(dev) == 0);
  490. cdev_init(&ubi->cdev, &ubi_cdev_operations);
  491. dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
  492. ubi->cdev.owner = THIS_MODULE;
  493. err = cdev_add(&ubi->cdev, dev, 1);
  494. if (err) {
  495. ubi_err("cannot add character device");
  496. goto out_unreg;
  497. }
  498. err = ubi_sysfs_init(ubi, ref);
  499. if (err)
  500. goto out_sysfs;
  501. for (i = 0; i < ubi->vtbl_slots; i++)
  502. if (ubi->volumes[i]) {
  503. err = ubi_add_volume(ubi, ubi->volumes[i]);
  504. if (err) {
  505. ubi_err("cannot add volume %d", i);
  506. goto out_volumes;
  507. }
  508. }
  509. return 0;
  510. out_volumes:
  511. kill_volumes(ubi);
  512. out_sysfs:
  513. if (*ref)
  514. get_device(&ubi->dev);
  515. ubi_sysfs_close(ubi);
  516. cdev_del(&ubi->cdev);
  517. out_unreg:
  518. unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
  519. ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
  520. return err;
  521. }
  522. /**
  523. * uif_close - close user interfaces for an UBI device.
  524. * @ubi: UBI device description object
  525. *
  526. * Note, since this function un-registers UBI volume device objects (@vol->dev),
  527. * the memory allocated voe the volumes is freed as well (in the release
  528. * function).
  529. */
  530. static void uif_close(struct ubi_device *ubi)
  531. {
  532. kill_volumes(ubi);
  533. ubi_sysfs_close(ubi);
  534. cdev_del(&ubi->cdev);
  535. unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
  536. }
  537. /**
  538. * ubi_free_internal_volumes - free internal volumes.
  539. * @ubi: UBI device description object
  540. */
  541. void ubi_free_internal_volumes(struct ubi_device *ubi)
  542. {
  543. int i;
  544. for (i = ubi->vtbl_slots;
  545. i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
  546. kfree(ubi->volumes[i]->eba_tbl);
  547. kfree(ubi->volumes[i]);
  548. }
  549. }
  550. static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
  551. {
  552. int limit, device_pebs;
  553. uint64_t device_size;
  554. if (!max_beb_per1024)
  555. return 0;
  556. /*
  557. * Here we are using size of the entire flash chip and
  558. * not just the MTD partition size because the maximum
  559. * number of bad eraseblocks is a percentage of the
  560. * whole device and bad eraseblocks are not fairly
  561. * distributed over the flash chip. So the worst case
  562. * is that all the bad eraseblocks of the chip are in
  563. * the MTD partition we are attaching (ubi->mtd).
  564. */
  565. device_size = mtd_get_device_size(ubi->mtd);
  566. device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
  567. limit = mult_frac(device_pebs, max_beb_per1024, 1024);
  568. /* Round it up */
  569. if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
  570. limit += 1;
  571. return limit;
  572. }
  573. /**
  574. * io_init - initialize I/O sub-system for a given UBI device.
  575. * @ubi: UBI device description object
  576. * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  577. *
  578. * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
  579. * assumed:
  580. * o EC header is always at offset zero - this cannot be changed;
  581. * o VID header starts just after the EC header at the closest address
  582. * aligned to @io->hdrs_min_io_size;
  583. * o data starts just after the VID header at the closest address aligned to
  584. * @io->min_io_size
  585. *
  586. * This function returns zero in case of success and a negative error code in
  587. * case of failure.
  588. */
  589. static int io_init(struct ubi_device *ubi, int max_beb_per1024)
  590. {
  591. dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
  592. dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
  593. if (ubi->mtd->numeraseregions != 0) {
  594. /*
  595. * Some flashes have several erase regions. Different regions
  596. * may have different eraseblock size and other
  597. * characteristics. It looks like mostly multi-region flashes
  598. * have one "main" region and one or more small regions to
  599. * store boot loader code or boot parameters or whatever. I
  600. * guess we should just pick the largest region. But this is
  601. * not implemented.
  602. */
  603. ubi_err("multiple regions, not implemented");
  604. return -EINVAL;
  605. }
  606. if (ubi->vid_hdr_offset < 0)
  607. return -EINVAL;
  608. /*
  609. * Note, in this implementation we support MTD devices with 0x7FFFFFFF
  610. * physical eraseblocks maximum.
  611. */
  612. ubi->peb_size = ubi->mtd->erasesize;
  613. ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
  614. ubi->flash_size = ubi->mtd->size;
  615. if (mtd_can_have_bb(ubi->mtd)) {
  616. ubi->bad_allowed = 1;
  617. ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
  618. }
  619. if (ubi->mtd->type == MTD_NORFLASH) {
  620. ubi_assert(ubi->mtd->writesize == 1);
  621. ubi->nor_flash = 1;
  622. }
  623. ubi->min_io_size = ubi->mtd->writesize;
  624. ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
  625. /*
  626. * Make sure minimal I/O unit is power of 2. Note, there is no
  627. * fundamental reason for this assumption. It is just an optimization
  628. * which allows us to avoid costly division operations.
  629. */
  630. if (!is_power_of_2(ubi->min_io_size)) {
  631. ubi_err("min. I/O unit (%d) is not power of 2",
  632. ubi->min_io_size);
  633. return -EINVAL;
  634. }
  635. ubi_assert(ubi->hdrs_min_io_size > 0);
  636. ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
  637. ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
  638. ubi->max_write_size = ubi->mtd->writebufsize;
  639. /*
  640. * Maximum write size has to be greater or equivalent to min. I/O
  641. * size, and be multiple of min. I/O size.
  642. */
  643. if (ubi->max_write_size < ubi->min_io_size ||
  644. ubi->max_write_size % ubi->min_io_size ||
  645. !is_power_of_2(ubi->max_write_size)) {
  646. ubi_err("bad write buffer size %d for %d min. I/O unit",
  647. ubi->max_write_size, ubi->min_io_size);
  648. return -EINVAL;
  649. }
  650. /* Calculate default aligned sizes of EC and VID headers */
  651. ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
  652. ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
  653. dbg_gen("min_io_size %d", ubi->min_io_size);
  654. dbg_gen("max_write_size %d", ubi->max_write_size);
  655. dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
  656. dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
  657. dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
  658. if (ubi->vid_hdr_offset == 0)
  659. /* Default offset */
  660. ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
  661. ubi->ec_hdr_alsize;
  662. else {
  663. ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
  664. ~(ubi->hdrs_min_io_size - 1);
  665. ubi->vid_hdr_shift = ubi->vid_hdr_offset -
  666. ubi->vid_hdr_aloffset;
  667. }
  668. /* Similar for the data offset */
  669. ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
  670. ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
  671. dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
  672. dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
  673. dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
  674. dbg_gen("leb_start %d", ubi->leb_start);
  675. /* The shift must be aligned to 32-bit boundary */
  676. if (ubi->vid_hdr_shift % 4) {
  677. ubi_err("unaligned VID header shift %d",
  678. ubi->vid_hdr_shift);
  679. return -EINVAL;
  680. }
  681. /* Check sanity */
  682. if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
  683. ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
  684. ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
  685. ubi->leb_start & (ubi->min_io_size - 1)) {
  686. ubi_err("bad VID header (%d) or data offsets (%d)",
  687. ubi->vid_hdr_offset, ubi->leb_start);
  688. return -EINVAL;
  689. }
  690. /*
  691. * Set maximum amount of physical erroneous eraseblocks to be 10%.
  692. * Erroneous PEB are those which have read errors.
  693. */
  694. ubi->max_erroneous = ubi->peb_count / 10;
  695. if (ubi->max_erroneous < 16)
  696. ubi->max_erroneous = 16;
  697. dbg_gen("max_erroneous %d", ubi->max_erroneous);
  698. /*
  699. * It may happen that EC and VID headers are situated in one minimal
  700. * I/O unit. In this case we can only accept this UBI image in
  701. * read-only mode.
  702. */
  703. if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
  704. ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
  705. ubi->ro_mode = 1;
  706. }
  707. ubi->leb_size = ubi->peb_size - ubi->leb_start;
  708. if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
  709. ubi_msg("MTD device %d is write-protected, attach in read-only mode",
  710. ubi->mtd->index);
  711. ubi->ro_mode = 1;
  712. }
  713. /*
  714. * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
  715. * unfortunately, MTD does not provide this information. We should loop
  716. * over all physical eraseblocks and invoke mtd->block_is_bad() for
  717. * each physical eraseblock. So, we leave @ubi->bad_peb_count
  718. * uninitialized so far.
  719. */
  720. return 0;
  721. }
  722. /**
  723. * autoresize - re-size the volume which has the "auto-resize" flag set.
  724. * @ubi: UBI device description object
  725. * @vol_id: ID of the volume to re-size
  726. *
  727. * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
  728. * the volume table to the largest possible size. See comments in ubi-header.h
  729. * for more description of the flag. Returns zero in case of success and a
  730. * negative error code in case of failure.
  731. */
  732. static int autoresize(struct ubi_device *ubi, int vol_id)
  733. {
  734. struct ubi_volume_desc desc;
  735. struct ubi_volume *vol = ubi->volumes[vol_id];
  736. int err, old_reserved_pebs = vol->reserved_pebs;
  737. if (ubi->ro_mode) {
  738. ubi_warn("skip auto-resize because of R/O mode");
  739. return 0;
  740. }
  741. /*
  742. * Clear the auto-resize flag in the volume in-memory copy of the
  743. * volume table, and 'ubi_resize_volume()' will propagate this change
  744. * to the flash.
  745. */
  746. ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
  747. if (ubi->avail_pebs == 0) {
  748. struct ubi_vtbl_record vtbl_rec;
  749. /*
  750. * No available PEBs to re-size the volume, clear the flag on
  751. * flash and exit.
  752. */
  753. vtbl_rec = ubi->vtbl[vol_id];
  754. err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
  755. if (err)
  756. ubi_err("cannot clean auto-resize flag for volume %d",
  757. vol_id);
  758. } else {
  759. desc.vol = vol;
  760. err = ubi_resize_volume(&desc,
  761. old_reserved_pebs + ubi->avail_pebs);
  762. if (err)
  763. ubi_err("cannot auto-resize volume %d", vol_id);
  764. }
  765. if (err)
  766. return err;
  767. ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
  768. vol->name, old_reserved_pebs, vol->reserved_pebs);
  769. return 0;
  770. }
  771. /**
  772. * ubi_attach_mtd_dev - attach an MTD device.
  773. * @mtd: MTD device description object
  774. * @ubi_num: number to assign to the new UBI device
  775. * @vid_hdr_offset: VID header offset
  776. * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  777. *
  778. * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
  779. * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
  780. * which case this function finds a vacant device number and assigns it
  781. * automatically. Returns the new UBI device number in case of success and a
  782. * negative error code in case of failure.
  783. *
  784. * Note, the invocations of this function has to be serialized by the
  785. * @ubi_devices_mutex.
  786. */
  787. int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
  788. int vid_hdr_offset, int max_beb_per1024)
  789. {
  790. struct ubi_device *ubi;
  791. int i, err, ref = 0;
  792. if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
  793. return -EINVAL;
  794. if (!max_beb_per1024)
  795. max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
  796. /*
  797. * Check if we already have the same MTD device attached.
  798. *
  799. * Note, this function assumes that UBI devices creations and deletions
  800. * are serialized, so it does not take the &ubi_devices_lock.
  801. */
  802. for (i = 0; i < UBI_MAX_DEVICES; i++) {
  803. ubi = ubi_devices[i];
  804. if (ubi && mtd->index == ubi->mtd->index) {
  805. ubi_err("mtd%d is already attached to ubi%d",
  806. mtd->index, i);
  807. return -EEXIST;
  808. }
  809. }
  810. /*
  811. * Make sure this MTD device is not emulated on top of an UBI volume
  812. * already. Well, generally this recursion works fine, but there are
  813. * different problems like the UBI module takes a reference to itself
  814. * by attaching (and thus, opening) the emulated MTD device. This
  815. * results in inability to unload the module. And in general it makes
  816. * no sense to attach emulated MTD devices, so we prohibit this.
  817. */
  818. if (mtd->type == MTD_UBIVOLUME) {
  819. ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
  820. mtd->index);
  821. return -EINVAL;
  822. }
  823. if (ubi_num == UBI_DEV_NUM_AUTO) {
  824. /* Search for an empty slot in the @ubi_devices array */
  825. for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
  826. if (!ubi_devices[ubi_num])
  827. break;
  828. if (ubi_num == UBI_MAX_DEVICES) {
  829. ubi_err("only %d UBI devices may be created",
  830. UBI_MAX_DEVICES);
  831. return -ENFILE;
  832. }
  833. } else {
  834. if (ubi_num >= UBI_MAX_DEVICES)
  835. return -EINVAL;
  836. /* Make sure ubi_num is not busy */
  837. if (ubi_devices[ubi_num]) {
  838. ubi_err("ubi%d already exists", ubi_num);
  839. return -EEXIST;
  840. }
  841. }
  842. ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
  843. if (!ubi)
  844. return -ENOMEM;
  845. ubi->mtd = mtd;
  846. ubi->ubi_num = ubi_num;
  847. ubi->vid_hdr_offset = vid_hdr_offset;
  848. ubi->autoresize_vol_id = -1;
  849. #ifdef CONFIG_MTD_UBI_FASTMAP
  850. ubi->fm_pool.used = ubi->fm_pool.size = 0;
  851. ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
  852. /*
  853. * fm_pool.max_size is 5% of the total number of PEBs but it's also
  854. * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
  855. */
  856. ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
  857. ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
  858. if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
  859. ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
  860. ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
  861. ubi->fm_disabled = !fm_autoconvert;
  862. if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
  863. <= UBI_FM_MAX_START) {
  864. ubi_err("More than %i PEBs are needed for fastmap, sorry.",
  865. UBI_FM_MAX_START);
  866. ubi->fm_disabled = 1;
  867. }
  868. ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
  869. ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
  870. #else
  871. ubi->fm_disabled = 1;
  872. #endif
  873. mutex_init(&ubi->buf_mutex);
  874. mutex_init(&ubi->ckvol_mutex);
  875. mutex_init(&ubi->device_mutex);
  876. spin_lock_init(&ubi->volumes_lock);
  877. mutex_init(&ubi->fm_mutex);
  878. init_rwsem(&ubi->fm_sem);
  879. ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
  880. err = io_init(ubi, max_beb_per1024);
  881. if (err)
  882. goto out_free;
  883. err = -ENOMEM;
  884. ubi->peb_buf = vmalloc(ubi->peb_size);
  885. if (!ubi->peb_buf)
  886. goto out_free;
  887. #ifdef CONFIG_MTD_UBI_FASTMAP
  888. ubi->fm_size = ubi_calc_fm_size(ubi);
  889. ubi->fm_buf = vzalloc(ubi->fm_size);
  890. if (!ubi->fm_buf)
  891. goto out_free;
  892. #endif
  893. err = ubi_attach(ubi, 0);
  894. if (err) {
  895. ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
  896. goto out_free;
  897. }
  898. if (ubi->autoresize_vol_id != -1) {
  899. err = autoresize(ubi, ubi->autoresize_vol_id);
  900. if (err)
  901. goto out_detach;
  902. }
  903. err = uif_init(ubi, &ref);
  904. if (err)
  905. goto out_detach;
  906. err = ubi_debugfs_init_dev(ubi);
  907. if (err)
  908. goto out_uif;
  909. ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
  910. if (IS_ERR(ubi->bgt_thread)) {
  911. err = PTR_ERR(ubi->bgt_thread);
  912. ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
  913. err);
  914. goto out_debugfs;
  915. }
  916. ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
  917. mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
  918. ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
  919. ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
  920. ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
  921. ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
  922. ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
  923. ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
  924. ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
  925. ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
  926. ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
  927. ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
  928. ubi->vtbl_slots);
  929. ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
  930. ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
  931. ubi->image_seq);
  932. ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
  933. ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
  934. /*
  935. * The below lock makes sure we do not race with 'ubi_thread()' which
  936. * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
  937. */
  938. spin_lock(&ubi->wl_lock);
  939. ubi->thread_enabled = 1;
  940. wake_up_process(ubi->bgt_thread);
  941. spin_unlock(&ubi->wl_lock);
  942. ubi_devices[ubi_num] = ubi;
  943. ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
  944. return ubi_num;
  945. out_debugfs:
  946. ubi_debugfs_exit_dev(ubi);
  947. out_uif:
  948. get_device(&ubi->dev);
  949. ubi_assert(ref);
  950. uif_close(ubi);
  951. out_detach:
  952. ubi_wl_close(ubi);
  953. ubi_free_internal_volumes(ubi);
  954. vfree(ubi->vtbl);
  955. out_free:
  956. vfree(ubi->peb_buf);
  957. vfree(ubi->fm_buf);
  958. if (ref)
  959. put_device(&ubi->dev);
  960. else
  961. kfree(ubi);
  962. return err;
  963. }
  964. /**
  965. * ubi_detach_mtd_dev - detach an MTD device.
  966. * @ubi_num: UBI device number to detach from
  967. * @anyway: detach MTD even if device reference count is not zero
  968. *
  969. * This function destroys an UBI device number @ubi_num and detaches the
  970. * underlying MTD device. Returns zero in case of success and %-EBUSY if the
  971. * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
  972. * exist.
  973. *
  974. * Note, the invocations of this function has to be serialized by the
  975. * @ubi_devices_mutex.
  976. */
  977. int ubi_detach_mtd_dev(int ubi_num, int anyway)
  978. {
  979. struct ubi_device *ubi;
  980. if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
  981. return -EINVAL;
  982. ubi = ubi_get_device(ubi_num);
  983. if (!ubi)
  984. return -EINVAL;
  985. spin_lock(&ubi_devices_lock);
  986. put_device(&ubi->dev);
  987. ubi->ref_count -= 1;
  988. if (ubi->ref_count) {
  989. if (!anyway) {
  990. spin_unlock(&ubi_devices_lock);
  991. return -EBUSY;
  992. }
  993. /* This may only happen if there is a bug */
  994. ubi_err("%s reference count %d, destroy anyway",
  995. ubi->ubi_name, ubi->ref_count);
  996. }
  997. ubi_devices[ubi_num] = NULL;
  998. spin_unlock(&ubi_devices_lock);
  999. ubi_assert(ubi_num == ubi->ubi_num);
  1000. ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
  1001. ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
  1002. #ifdef CONFIG_MTD_UBI_FASTMAP
  1003. /* If we don't write a new fastmap at detach time we lose all
  1004. * EC updates that have been made since the last written fastmap. */
  1005. ubi_update_fastmap(ubi);
  1006. #endif
  1007. /*
  1008. * Before freeing anything, we have to stop the background thread to
  1009. * prevent it from doing anything on this device while we are freeing.
  1010. */
  1011. if (ubi->bgt_thread)
  1012. kthread_stop(ubi->bgt_thread);
  1013. /*
  1014. * Get a reference to the device in order to prevent 'dev_release()'
  1015. * from freeing the @ubi object.
  1016. */
  1017. get_device(&ubi->dev);
  1018. ubi_debugfs_exit_dev(ubi);
  1019. uif_close(ubi);
  1020. ubi_wl_close(ubi);
  1021. ubi_free_internal_volumes(ubi);
  1022. vfree(ubi->vtbl);
  1023. put_mtd_device(ubi->mtd);
  1024. vfree(ubi->peb_buf);
  1025. vfree(ubi->fm_buf);
  1026. ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
  1027. put_device(&ubi->dev);
  1028. return 0;
  1029. }
  1030. #ifndef __UBOOT__
  1031. /**
  1032. * open_mtd_by_chdev - open an MTD device by its character device node path.
  1033. * @mtd_dev: MTD character device node path
  1034. *
  1035. * This helper function opens an MTD device by its character node device path.
  1036. * Returns MTD device description object in case of success and a negative
  1037. * error code in case of failure.
  1038. */
  1039. static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
  1040. {
  1041. int err, major, minor, mode;
  1042. struct path path;
  1043. /* Probably this is an MTD character device node path */
  1044. err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
  1045. if (err)
  1046. return ERR_PTR(err);
  1047. /* MTD device number is defined by the major / minor numbers */
  1048. major = imajor(path.dentry->d_inode);
  1049. minor = iminor(path.dentry->d_inode);
  1050. mode = path.dentry->d_inode->i_mode;
  1051. path_put(&path);
  1052. if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
  1053. return ERR_PTR(-EINVAL);
  1054. if (minor & 1)
  1055. /*
  1056. * Just do not think the "/dev/mtdrX" devices support is need,
  1057. * so do not support them to avoid doing extra work.
  1058. */
  1059. return ERR_PTR(-EINVAL);
  1060. return get_mtd_device(NULL, minor / 2);
  1061. }
  1062. #endif
  1063. /**
  1064. * open_mtd_device - open MTD device by name, character device path, or number.
  1065. * @mtd_dev: name, character device node path, or MTD device device number
  1066. *
  1067. * This function tries to open and MTD device described by @mtd_dev string,
  1068. * which is first treated as ASCII MTD device number, and if it is not true, it
  1069. * is treated as MTD device name, and if that is also not true, it is treated
  1070. * as MTD character device node path. Returns MTD device description object in
  1071. * case of success and a negative error code in case of failure.
  1072. */
  1073. static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
  1074. {
  1075. struct mtd_info *mtd;
  1076. int mtd_num;
  1077. char *endp;
  1078. mtd_num = simple_strtoul(mtd_dev, &endp, 0);
  1079. if (*endp != '\0' || mtd_dev == endp) {
  1080. /*
  1081. * This does not look like an ASCII integer, probably this is
  1082. * MTD device name.
  1083. */
  1084. mtd = get_mtd_device_nm(mtd_dev);
  1085. #ifndef __UBOOT__
  1086. if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
  1087. /* Probably this is an MTD character device node path */
  1088. mtd = open_mtd_by_chdev(mtd_dev);
  1089. #endif
  1090. } else
  1091. mtd = get_mtd_device(NULL, mtd_num);
  1092. return mtd;
  1093. }
  1094. #ifndef __UBOOT__
  1095. static int __init ubi_init(void)
  1096. #else
  1097. int ubi_init(void)
  1098. #endif
  1099. {
  1100. int err, i, k;
  1101. /* Ensure that EC and VID headers have correct size */
  1102. BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
  1103. BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
  1104. if (mtd_devs > UBI_MAX_DEVICES) {
  1105. ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES);
  1106. return -EINVAL;
  1107. }
  1108. /* Create base sysfs directory and sysfs files */
  1109. ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
  1110. if (IS_ERR(ubi_class)) {
  1111. err = PTR_ERR(ubi_class);
  1112. ubi_err("cannot create UBI class");
  1113. goto out;
  1114. }
  1115. err = class_create_file(ubi_class, &ubi_version);
  1116. if (err) {
  1117. ubi_err("cannot create sysfs file");
  1118. goto out_class;
  1119. }
  1120. err = misc_register(&ubi_ctrl_cdev);
  1121. if (err) {
  1122. ubi_err("cannot register device");
  1123. goto out_version;
  1124. }
  1125. ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
  1126. sizeof(struct ubi_wl_entry),
  1127. 0, 0, NULL);
  1128. if (!ubi_wl_entry_slab) {
  1129. err = -ENOMEM;
  1130. goto out_dev_unreg;
  1131. }
  1132. err = ubi_debugfs_init();
  1133. if (err)
  1134. goto out_slab;
  1135. /* Attach MTD devices */
  1136. for (i = 0; i < mtd_devs; i++) {
  1137. struct mtd_dev_param *p = &mtd_dev_param[i];
  1138. struct mtd_info *mtd;
  1139. cond_resched();
  1140. mtd = open_mtd_device(p->name);
  1141. if (IS_ERR(mtd)) {
  1142. err = PTR_ERR(mtd);
  1143. ubi_err("cannot open mtd %s, error %d", p->name, err);
  1144. /* See comment below re-ubi_is_module(). */
  1145. if (ubi_is_module())
  1146. goto out_detach;
  1147. continue;
  1148. }
  1149. mutex_lock(&ubi_devices_mutex);
  1150. err = ubi_attach_mtd_dev(mtd, p->ubi_num,
  1151. p->vid_hdr_offs, p->max_beb_per1024);
  1152. mutex_unlock(&ubi_devices_mutex);
  1153. if (err < 0) {
  1154. ubi_err("cannot attach mtd%d", mtd->index);
  1155. put_mtd_device(mtd);
  1156. /*
  1157. * Originally UBI stopped initializing on any error.
  1158. * However, later on it was found out that this
  1159. * behavior is not very good when UBI is compiled into
  1160. * the kernel and the MTD devices to attach are passed
  1161. * through the command line. Indeed, UBI failure
  1162. * stopped whole boot sequence.
  1163. *
  1164. * To fix this, we changed the behavior for the
  1165. * non-module case, but preserved the old behavior for
  1166. * the module case, just for compatibility. This is a
  1167. * little inconsistent, though.
  1168. */
  1169. if (ubi_is_module())
  1170. goto out_detach;
  1171. }
  1172. }
  1173. err = ubiblock_init();
  1174. if (err) {
  1175. ubi_err("block: cannot initialize, error %d", err);
  1176. /* See comment above re-ubi_is_module(). */
  1177. if (ubi_is_module())
  1178. goto out_detach;
  1179. }
  1180. return 0;
  1181. out_detach:
  1182. for (k = 0; k < i; k++)
  1183. if (ubi_devices[k]) {
  1184. mutex_lock(&ubi_devices_mutex);
  1185. ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
  1186. mutex_unlock(&ubi_devices_mutex);
  1187. }
  1188. ubi_debugfs_exit();
  1189. out_slab:
  1190. kmem_cache_destroy(ubi_wl_entry_slab);
  1191. out_dev_unreg:
  1192. misc_deregister(&ubi_ctrl_cdev);
  1193. out_version:
  1194. class_remove_file(ubi_class, &ubi_version);
  1195. out_class:
  1196. class_destroy(ubi_class);
  1197. out:
  1198. ubi_err("cannot initialize UBI, error %d", err);
  1199. return err;
  1200. }
  1201. late_initcall(ubi_init);
  1202. #ifndef __UBOOT__
  1203. static void __exit ubi_exit(void)
  1204. #else
  1205. void ubi_exit(void)
  1206. #endif
  1207. {
  1208. int i;
  1209. ubiblock_exit();
  1210. for (i = 0; i < UBI_MAX_DEVICES; i++)
  1211. if (ubi_devices[i]) {
  1212. mutex_lock(&ubi_devices_mutex);
  1213. ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
  1214. mutex_unlock(&ubi_devices_mutex);
  1215. }
  1216. ubi_debugfs_exit();
  1217. kmem_cache_destroy(ubi_wl_entry_slab);
  1218. misc_deregister(&ubi_ctrl_cdev);
  1219. class_remove_file(ubi_class, &ubi_version);
  1220. class_destroy(ubi_class);
  1221. }
  1222. module_exit(ubi_exit);
  1223. /**
  1224. * bytes_str_to_int - convert a number of bytes string into an integer.
  1225. * @str: the string to convert
  1226. *
  1227. * This function returns positive resulting integer in case of success and a
  1228. * negative error code in case of failure.
  1229. */
  1230. static int __init bytes_str_to_int(const char *str)
  1231. {
  1232. char *endp;
  1233. unsigned long result;
  1234. result = simple_strtoul(str, &endp, 0);
  1235. if (str == endp || result >= INT_MAX) {
  1236. ubi_err("incorrect bytes count: \"%s\"\n", str);
  1237. return -EINVAL;
  1238. }
  1239. switch (*endp) {
  1240. case 'G':
  1241. result *= 1024;
  1242. case 'M':
  1243. result *= 1024;
  1244. case 'K':
  1245. result *= 1024;
  1246. if (endp[1] == 'i' && endp[2] == 'B')
  1247. endp += 2;
  1248. case '\0':
  1249. break;
  1250. default:
  1251. ubi_err("incorrect bytes count: \"%s\"\n", str);
  1252. return -EINVAL;
  1253. }
  1254. return result;
  1255. }
  1256. int kstrtoint(const char *s, unsigned int base, int *res)
  1257. {
  1258. unsigned long long tmp;
  1259. tmp = simple_strtoull(s, NULL, base);
  1260. if (tmp != (unsigned long long)(int)tmp)
  1261. return -ERANGE;
  1262. return (int)tmp;
  1263. }
  1264. /**
  1265. * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
  1266. * @val: the parameter value to parse
  1267. * @kp: not used
  1268. *
  1269. * This function returns zero in case of success and a negative error code in
  1270. * case of error.
  1271. */
  1272. #ifndef __UBOOT__
  1273. static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
  1274. #else
  1275. int ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
  1276. #endif
  1277. {
  1278. int i, len;
  1279. struct mtd_dev_param *p;
  1280. char buf[MTD_PARAM_LEN_MAX];
  1281. char *pbuf = &buf[0];
  1282. char *tokens[MTD_PARAM_MAX_COUNT], *token;
  1283. if (!val)
  1284. return -EINVAL;
  1285. if (mtd_devs == UBI_MAX_DEVICES) {
  1286. ubi_err("too many parameters, max. is %d\n",
  1287. UBI_MAX_DEVICES);
  1288. return -EINVAL;
  1289. }
  1290. len = strnlen(val, MTD_PARAM_LEN_MAX);
  1291. if (len == MTD_PARAM_LEN_MAX) {
  1292. ubi_err("parameter \"%s\" is too long, max. is %d\n",
  1293. val, MTD_PARAM_LEN_MAX);
  1294. return -EINVAL;
  1295. }
  1296. if (len == 0) {
  1297. pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
  1298. return 0;
  1299. }
  1300. strcpy(buf, val);
  1301. /* Get rid of the final newline */
  1302. if (buf[len - 1] == '\n')
  1303. buf[len - 1] = '\0';
  1304. for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
  1305. tokens[i] = strsep(&pbuf, ",");
  1306. if (pbuf) {
  1307. ubi_err("too many arguments at \"%s\"\n", val);
  1308. return -EINVAL;
  1309. }
  1310. p = &mtd_dev_param[mtd_devs];
  1311. strcpy(&p->name[0], tokens[0]);
  1312. token = tokens[1];
  1313. if (token) {
  1314. p->vid_hdr_offs = bytes_str_to_int(token);
  1315. if (p->vid_hdr_offs < 0)
  1316. return p->vid_hdr_offs;
  1317. }
  1318. token = tokens[2];
  1319. if (token) {
  1320. int err = kstrtoint(token, 10, &p->max_beb_per1024);
  1321. if (err) {
  1322. ubi_err("bad value for max_beb_per1024 parameter: %s",
  1323. token);
  1324. return -EINVAL;
  1325. }
  1326. }
  1327. token = tokens[3];
  1328. if (token) {
  1329. int err = kstrtoint(token, 10, &p->ubi_num);
  1330. if (err) {
  1331. ubi_err("bad value for ubi_num parameter: %s", token);
  1332. return -EINVAL;
  1333. }
  1334. } else
  1335. p->ubi_num = UBI_DEV_NUM_AUTO;
  1336. mtd_devs += 1;
  1337. return 0;
  1338. }
  1339. module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
  1340. MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
  1341. "Multiple \"mtd\" parameters may be specified.\n"
  1342. "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
  1343. "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
  1344. "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
  1345. __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
  1346. "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
  1347. "\n"
  1348. "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
  1349. "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
  1350. "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
  1351. "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
  1352. "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
  1353. #ifdef CONFIG_MTD_UBI_FASTMAP
  1354. module_param(fm_autoconvert, bool, 0644);
  1355. MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
  1356. #endif
  1357. MODULE_VERSION(__stringify(UBI_VERSION));
  1358. MODULE_DESCRIPTION("UBI - Unsorted Block Images");
  1359. MODULE_AUTHOR("Artem Bityutskiy");
  1360. MODULE_LICENSE("GPL");