device.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Device manager
  4. *
  5. * Copyright (c) 2013 Google, Inc
  6. *
  7. * (C) Copyright 2012
  8. * Pavel Herrmann <morpheus.ibis@gmail.com>
  9. */
  10. #include <common.h>
  11. #include <asm/io.h>
  12. #include <clk.h>
  13. #include <fdtdec.h>
  14. #include <fdt_support.h>
  15. #include <malloc.h>
  16. #include <dm/device.h>
  17. #include <dm/device-internal.h>
  18. #include <dm/lists.h>
  19. #include <dm/of_access.h>
  20. #include <dm/pinctrl.h>
  21. #include <dm/platdata.h>
  22. #include <dm/read.h>
  23. #include <dm/uclass.h>
  24. #include <dm/uclass-internal.h>
  25. #include <dm/util.h>
  26. #include <linux/err.h>
  27. #include <linux/list.h>
  28. #include <power-domain.h>
  29. DECLARE_GLOBAL_DATA_PTR;
  30. static int device_bind_common(struct udevice *parent, const struct driver *drv,
  31. const char *name, void *platdata,
  32. ulong driver_data, ofnode node,
  33. uint of_platdata_size, struct udevice **devp)
  34. {
  35. struct udevice *dev;
  36. struct uclass *uc;
  37. int size, ret = 0;
  38. if (devp)
  39. *devp = NULL;
  40. if (!name)
  41. return -EINVAL;
  42. ret = uclass_get(drv->id, &uc);
  43. if (ret) {
  44. debug("Missing uclass for driver %s\n", drv->name);
  45. return ret;
  46. }
  47. dev = calloc(1, sizeof(struct udevice));
  48. if (!dev)
  49. return -ENOMEM;
  50. INIT_LIST_HEAD(&dev->sibling_node);
  51. INIT_LIST_HEAD(&dev->child_head);
  52. INIT_LIST_HEAD(&dev->uclass_node);
  53. #ifdef CONFIG_DEVRES
  54. INIT_LIST_HEAD(&dev->devres_head);
  55. #endif
  56. dev->platdata = platdata;
  57. dev->driver_data = driver_data;
  58. dev->name = name;
  59. dev->node = node;
  60. dev->parent = parent;
  61. dev->driver = drv;
  62. dev->uclass = uc;
  63. dev->seq = -1;
  64. dev->req_seq = -1;
  65. if (CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(DM_SEQ_ALIAS)) {
  66. /*
  67. * Some devices, such as a SPI bus, I2C bus and serial ports
  68. * are numbered using aliases.
  69. *
  70. * This is just a 'requested' sequence, and will be
  71. * resolved (and ->seq updated) when the device is probed.
  72. */
  73. if (uc->uc_drv->flags & DM_UC_FLAG_SEQ_ALIAS) {
  74. if (uc->uc_drv->name && ofnode_valid(node)) {
  75. dev_read_alias_seq(dev, &dev->req_seq);
  76. }
  77. }
  78. }
  79. if (drv->platdata_auto_alloc_size) {
  80. bool alloc = !platdata;
  81. if (CONFIG_IS_ENABLED(OF_PLATDATA)) {
  82. if (of_platdata_size) {
  83. dev->flags |= DM_FLAG_OF_PLATDATA;
  84. if (of_platdata_size <
  85. drv->platdata_auto_alloc_size)
  86. alloc = true;
  87. }
  88. }
  89. if (alloc) {
  90. dev->flags |= DM_FLAG_ALLOC_PDATA;
  91. dev->platdata = calloc(1,
  92. drv->platdata_auto_alloc_size);
  93. if (!dev->platdata) {
  94. ret = -ENOMEM;
  95. goto fail_alloc1;
  96. }
  97. if (CONFIG_IS_ENABLED(OF_PLATDATA) && platdata) {
  98. memcpy(dev->platdata, platdata,
  99. of_platdata_size);
  100. }
  101. }
  102. }
  103. size = uc->uc_drv->per_device_platdata_auto_alloc_size;
  104. if (size) {
  105. dev->flags |= DM_FLAG_ALLOC_UCLASS_PDATA;
  106. dev->uclass_platdata = calloc(1, size);
  107. if (!dev->uclass_platdata) {
  108. ret = -ENOMEM;
  109. goto fail_alloc2;
  110. }
  111. }
  112. if (parent) {
  113. size = parent->driver->per_child_platdata_auto_alloc_size;
  114. if (!size) {
  115. size = parent->uclass->uc_drv->
  116. per_child_platdata_auto_alloc_size;
  117. }
  118. if (size) {
  119. dev->flags |= DM_FLAG_ALLOC_PARENT_PDATA;
  120. dev->parent_platdata = calloc(1, size);
  121. if (!dev->parent_platdata) {
  122. ret = -ENOMEM;
  123. goto fail_alloc3;
  124. }
  125. }
  126. }
  127. /* put dev into parent's successor list */
  128. if (parent)
  129. list_add_tail(&dev->sibling_node, &parent->child_head);
  130. ret = uclass_bind_device(dev);
  131. if (ret)
  132. goto fail_uclass_bind;
  133. /* if we fail to bind we remove device from successors and free it */
  134. if (drv->bind) {
  135. ret = drv->bind(dev);
  136. if (ret)
  137. goto fail_bind;
  138. }
  139. if (parent && parent->driver->child_post_bind) {
  140. ret = parent->driver->child_post_bind(dev);
  141. if (ret)
  142. goto fail_child_post_bind;
  143. }
  144. if (uc->uc_drv->post_bind) {
  145. ret = uc->uc_drv->post_bind(dev);
  146. if (ret)
  147. goto fail_uclass_post_bind;
  148. }
  149. if (parent)
  150. pr_debug("Bound device %s to %s\n", dev->name, parent->name);
  151. if (devp)
  152. *devp = dev;
  153. dev->flags |= DM_FLAG_BOUND;
  154. return 0;
  155. fail_uclass_post_bind:
  156. /* There is no child unbind() method, so no clean-up required */
  157. fail_child_post_bind:
  158. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  159. if (drv->unbind && drv->unbind(dev)) {
  160. dm_warn("unbind() method failed on dev '%s' on error path\n",
  161. dev->name);
  162. }
  163. }
  164. fail_bind:
  165. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  166. if (uclass_unbind_device(dev)) {
  167. dm_warn("Failed to unbind dev '%s' on error path\n",
  168. dev->name);
  169. }
  170. }
  171. fail_uclass_bind:
  172. if (CONFIG_IS_ENABLED(DM_DEVICE_REMOVE)) {
  173. list_del(&dev->sibling_node);
  174. if (dev->flags & DM_FLAG_ALLOC_PARENT_PDATA) {
  175. free(dev->parent_platdata);
  176. dev->parent_platdata = NULL;
  177. }
  178. }
  179. fail_alloc3:
  180. if (dev->flags & DM_FLAG_ALLOC_UCLASS_PDATA) {
  181. free(dev->uclass_platdata);
  182. dev->uclass_platdata = NULL;
  183. }
  184. fail_alloc2:
  185. if (dev->flags & DM_FLAG_ALLOC_PDATA) {
  186. free(dev->platdata);
  187. dev->platdata = NULL;
  188. }
  189. fail_alloc1:
  190. devres_release_all(dev);
  191. free(dev);
  192. return ret;
  193. }
  194. int device_bind_with_driver_data(struct udevice *parent,
  195. const struct driver *drv, const char *name,
  196. ulong driver_data, ofnode node,
  197. struct udevice **devp)
  198. {
  199. return device_bind_common(parent, drv, name, NULL, driver_data, node,
  200. 0, devp);
  201. }
  202. int device_bind(struct udevice *parent, const struct driver *drv,
  203. const char *name, void *platdata, int of_offset,
  204. struct udevice **devp)
  205. {
  206. return device_bind_common(parent, drv, name, platdata, 0,
  207. offset_to_ofnode(of_offset), 0, devp);
  208. }
  209. int device_bind_ofnode(struct udevice *parent, const struct driver *drv,
  210. const char *name, void *platdata, ofnode node,
  211. struct udevice **devp)
  212. {
  213. return device_bind_common(parent, drv, name, platdata, 0, node, 0,
  214. devp);
  215. }
  216. int device_bind_by_name(struct udevice *parent, bool pre_reloc_only,
  217. const struct driver_info *info, struct udevice **devp)
  218. {
  219. struct driver *drv;
  220. uint platdata_size = 0;
  221. drv = lists_driver_lookup_name(info->name);
  222. if (!drv)
  223. return -ENOENT;
  224. if (pre_reloc_only && !(drv->flags & DM_FLAG_PRE_RELOC))
  225. return -EPERM;
  226. #if CONFIG_IS_ENABLED(OF_PLATDATA)
  227. platdata_size = info->platdata_size;
  228. #endif
  229. return device_bind_common(parent, drv, info->name,
  230. (void *)info->platdata, 0, ofnode_null(), platdata_size,
  231. devp);
  232. }
  233. static void *alloc_priv(int size, uint flags)
  234. {
  235. void *priv;
  236. if (flags & DM_FLAG_ALLOC_PRIV_DMA) {
  237. size = ROUND(size, ARCH_DMA_MINALIGN);
  238. priv = memalign(ARCH_DMA_MINALIGN, size);
  239. if (priv) {
  240. memset(priv, '\0', size);
  241. /*
  242. * Ensure that the zero bytes are flushed to memory.
  243. * This prevents problems if the driver uses this as
  244. * both an input and an output buffer:
  245. *
  246. * 1. Zeroes written to buffer (here) and sit in the
  247. * cache
  248. * 2. Driver issues a read command to DMA
  249. * 3. CPU runs out of cache space and evicts some cache
  250. * data in the buffer, writing zeroes to RAM from
  251. * the memset() above
  252. * 4. DMA completes
  253. * 5. Buffer now has some DMA data and some zeroes
  254. * 6. Data being read is now incorrect
  255. *
  256. * To prevent this, ensure that the cache is clean
  257. * within this range at the start. The driver can then
  258. * use normal flush-after-write, invalidate-before-read
  259. * procedures.
  260. *
  261. * TODO(sjg@chromium.org): Drop this microblaze
  262. * exception.
  263. */
  264. #ifndef CONFIG_MICROBLAZE
  265. flush_dcache_range((ulong)priv, (ulong)priv + size);
  266. #endif
  267. }
  268. } else {
  269. priv = calloc(1, size);
  270. }
  271. return priv;
  272. }
  273. int device_probe(struct udevice *dev)
  274. {
  275. struct power_domain pd;
  276. const struct driver *drv;
  277. int size = 0;
  278. int ret;
  279. int seq;
  280. if (!dev)
  281. return -EINVAL;
  282. if (dev->flags & DM_FLAG_ACTIVATED)
  283. return 0;
  284. drv = dev->driver;
  285. assert(drv);
  286. /* Allocate private data if requested and not reentered */
  287. if (drv->priv_auto_alloc_size && !dev->priv) {
  288. dev->priv = alloc_priv(drv->priv_auto_alloc_size, drv->flags);
  289. if (!dev->priv) {
  290. ret = -ENOMEM;
  291. goto fail;
  292. }
  293. }
  294. /* Allocate private data if requested and not reentered */
  295. size = dev->uclass->uc_drv->per_device_auto_alloc_size;
  296. if (size && !dev->uclass_priv) {
  297. dev->uclass_priv = alloc_priv(size,
  298. dev->uclass->uc_drv->flags);
  299. if (!dev->uclass_priv) {
  300. ret = -ENOMEM;
  301. goto fail;
  302. }
  303. }
  304. /* Ensure all parents are probed */
  305. if (dev->parent) {
  306. size = dev->parent->driver->per_child_auto_alloc_size;
  307. if (!size) {
  308. size = dev->parent->uclass->uc_drv->
  309. per_child_auto_alloc_size;
  310. }
  311. if (size && !dev->parent_priv) {
  312. dev->parent_priv = alloc_priv(size, drv->flags);
  313. if (!dev->parent_priv) {
  314. ret = -ENOMEM;
  315. goto fail;
  316. }
  317. }
  318. ret = device_probe(dev->parent);
  319. if (ret)
  320. goto fail;
  321. /*
  322. * The device might have already been probed during
  323. * the call to device_probe() on its parent device
  324. * (e.g. PCI bridge devices). Test the flags again
  325. * so that we don't mess up the device.
  326. */
  327. if (dev->flags & DM_FLAG_ACTIVATED)
  328. return 0;
  329. }
  330. seq = uclass_resolve_seq(dev);
  331. if (seq < 0) {
  332. ret = seq;
  333. goto fail;
  334. }
  335. dev->seq = seq;
  336. dev->flags |= DM_FLAG_ACTIVATED;
  337. /*
  338. * Process pinctrl for everything except the root device, and
  339. * continue regardless of the result of pinctrl. Don't process pinctrl
  340. * settings for pinctrl devices since the device may not yet be
  341. * probed.
  342. */
  343. if (dev->parent && device_get_uclass_id(dev) != UCLASS_PINCTRL)
  344. pinctrl_select_state(dev, "default");
  345. if (dev->parent && device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) {
  346. if (!power_domain_get(dev, &pd))
  347. power_domain_on(&pd);
  348. }
  349. ret = uclass_pre_probe_device(dev);
  350. if (ret)
  351. goto fail;
  352. if (dev->parent && dev->parent->driver->child_pre_probe) {
  353. ret = dev->parent->driver->child_pre_probe(dev);
  354. if (ret)
  355. goto fail;
  356. }
  357. if (drv->ofdata_to_platdata && dev_has_of_node(dev)) {
  358. ret = drv->ofdata_to_platdata(dev);
  359. if (ret)
  360. goto fail;
  361. }
  362. /* Process 'assigned-{clocks/clock-parents/clock-rates}' properties */
  363. ret = clk_set_defaults(dev);
  364. if (ret)
  365. goto fail;
  366. if (drv->probe) {
  367. ret = drv->probe(dev);
  368. if (ret) {
  369. dev->flags &= ~DM_FLAG_ACTIVATED;
  370. goto fail;
  371. }
  372. }
  373. ret = uclass_post_probe_device(dev);
  374. if (ret)
  375. goto fail_uclass;
  376. if (dev->parent && device_get_uclass_id(dev) == UCLASS_PINCTRL)
  377. pinctrl_select_state(dev, "default");
  378. return 0;
  379. fail_uclass:
  380. if (device_remove(dev, DM_REMOVE_NORMAL)) {
  381. dm_warn("%s: Device '%s' failed to remove on error path\n",
  382. __func__, dev->name);
  383. }
  384. fail:
  385. dev->flags &= ~DM_FLAG_ACTIVATED;
  386. dev->seq = -1;
  387. device_free(dev);
  388. return ret;
  389. }
  390. void *dev_get_platdata(const struct udevice *dev)
  391. {
  392. if (!dev) {
  393. dm_warn("%s: null device\n", __func__);
  394. return NULL;
  395. }
  396. return dev->platdata;
  397. }
  398. void *dev_get_parent_platdata(const struct udevice *dev)
  399. {
  400. if (!dev) {
  401. dm_warn("%s: null device\n", __func__);
  402. return NULL;
  403. }
  404. return dev->parent_platdata;
  405. }
  406. void *dev_get_uclass_platdata(const struct udevice *dev)
  407. {
  408. if (!dev) {
  409. dm_warn("%s: null device\n", __func__);
  410. return NULL;
  411. }
  412. return dev->uclass_platdata;
  413. }
  414. void *dev_get_priv(const struct udevice *dev)
  415. {
  416. if (!dev) {
  417. dm_warn("%s: null device\n", __func__);
  418. return NULL;
  419. }
  420. return dev->priv;
  421. }
  422. void *dev_get_uclass_priv(const struct udevice *dev)
  423. {
  424. if (!dev) {
  425. dm_warn("%s: null device\n", __func__);
  426. return NULL;
  427. }
  428. return dev->uclass_priv;
  429. }
  430. void *dev_get_parent_priv(const struct udevice *dev)
  431. {
  432. if (!dev) {
  433. dm_warn("%s: null device\n", __func__);
  434. return NULL;
  435. }
  436. return dev->parent_priv;
  437. }
  438. static int device_get_device_tail(struct udevice *dev, int ret,
  439. struct udevice **devp)
  440. {
  441. if (ret)
  442. return ret;
  443. ret = device_probe(dev);
  444. if (ret)
  445. return ret;
  446. *devp = dev;
  447. return 0;
  448. }
  449. /**
  450. * device_find_by_ofnode() - Return device associated with given ofnode
  451. *
  452. * The returned device is *not* activated.
  453. *
  454. * @node: The ofnode for which a associated device should be looked up
  455. * @devp: Pointer to structure to hold the found device
  456. * Return: 0 if OK, -ve on error
  457. */
  458. static int device_find_by_ofnode(ofnode node, struct udevice **devp)
  459. {
  460. struct uclass *uc;
  461. struct udevice *dev;
  462. int ret;
  463. list_for_each_entry(uc, &gd->uclass_root, sibling_node) {
  464. ret = uclass_find_device_by_ofnode(uc->uc_drv->id, node,
  465. &dev);
  466. if (!ret || dev) {
  467. *devp = dev;
  468. return 0;
  469. }
  470. }
  471. return -ENODEV;
  472. }
  473. int device_get_child(struct udevice *parent, int index, struct udevice **devp)
  474. {
  475. struct udevice *dev;
  476. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  477. if (!index--)
  478. return device_get_device_tail(dev, 0, devp);
  479. }
  480. return -ENODEV;
  481. }
  482. int device_find_child_by_seq(struct udevice *parent, int seq_or_req_seq,
  483. bool find_req_seq, struct udevice **devp)
  484. {
  485. struct udevice *dev;
  486. *devp = NULL;
  487. if (seq_or_req_seq == -1)
  488. return -ENODEV;
  489. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  490. if ((find_req_seq ? dev->req_seq : dev->seq) ==
  491. seq_or_req_seq) {
  492. *devp = dev;
  493. return 0;
  494. }
  495. }
  496. return -ENODEV;
  497. }
  498. int device_get_child_by_seq(struct udevice *parent, int seq,
  499. struct udevice **devp)
  500. {
  501. struct udevice *dev;
  502. int ret;
  503. *devp = NULL;
  504. ret = device_find_child_by_seq(parent, seq, false, &dev);
  505. if (ret == -ENODEV) {
  506. /*
  507. * We didn't find it in probed devices. See if there is one
  508. * that will request this seq if probed.
  509. */
  510. ret = device_find_child_by_seq(parent, seq, true, &dev);
  511. }
  512. return device_get_device_tail(dev, ret, devp);
  513. }
  514. int device_find_child_by_of_offset(struct udevice *parent, int of_offset,
  515. struct udevice **devp)
  516. {
  517. struct udevice *dev;
  518. *devp = NULL;
  519. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  520. if (dev_of_offset(dev) == of_offset) {
  521. *devp = dev;
  522. return 0;
  523. }
  524. }
  525. return -ENODEV;
  526. }
  527. int device_get_child_by_of_offset(struct udevice *parent, int node,
  528. struct udevice **devp)
  529. {
  530. struct udevice *dev;
  531. int ret;
  532. *devp = NULL;
  533. ret = device_find_child_by_of_offset(parent, node, &dev);
  534. return device_get_device_tail(dev, ret, devp);
  535. }
  536. static struct udevice *_device_find_global_by_ofnode(struct udevice *parent,
  537. ofnode ofnode)
  538. {
  539. struct udevice *dev, *found;
  540. if (ofnode_equal(dev_ofnode(parent), ofnode))
  541. return parent;
  542. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  543. found = _device_find_global_by_ofnode(dev, ofnode);
  544. if (found)
  545. return found;
  546. }
  547. return NULL;
  548. }
  549. int device_find_global_by_ofnode(ofnode ofnode, struct udevice **devp)
  550. {
  551. *devp = _device_find_global_by_ofnode(gd->dm_root, ofnode);
  552. return *devp ? 0 : -ENOENT;
  553. }
  554. int device_get_global_by_ofnode(ofnode ofnode, struct udevice **devp)
  555. {
  556. struct udevice *dev;
  557. dev = _device_find_global_by_ofnode(gd->dm_root, ofnode);
  558. return device_get_device_tail(dev, dev ? 0 : -ENOENT, devp);
  559. }
  560. int device_find_first_child(struct udevice *parent, struct udevice **devp)
  561. {
  562. if (list_empty(&parent->child_head)) {
  563. *devp = NULL;
  564. } else {
  565. *devp = list_first_entry(&parent->child_head, struct udevice,
  566. sibling_node);
  567. }
  568. return 0;
  569. }
  570. int device_find_next_child(struct udevice **devp)
  571. {
  572. struct udevice *dev = *devp;
  573. struct udevice *parent = dev->parent;
  574. if (list_is_last(&dev->sibling_node, &parent->child_head)) {
  575. *devp = NULL;
  576. } else {
  577. *devp = list_entry(dev->sibling_node.next, struct udevice,
  578. sibling_node);
  579. }
  580. return 0;
  581. }
  582. int device_find_first_inactive_child(struct udevice *parent,
  583. enum uclass_id uclass_id,
  584. struct udevice **devp)
  585. {
  586. struct udevice *dev;
  587. *devp = NULL;
  588. list_for_each_entry(dev, &parent->child_head, sibling_node) {
  589. if (!device_active(dev) &&
  590. device_get_uclass_id(dev) == uclass_id) {
  591. *devp = dev;
  592. return 0;
  593. }
  594. }
  595. return -ENODEV;
  596. }
  597. struct udevice *dev_get_parent(const struct udevice *child)
  598. {
  599. return child->parent;
  600. }
  601. ulong dev_get_driver_data(const struct udevice *dev)
  602. {
  603. return dev->driver_data;
  604. }
  605. const void *dev_get_driver_ops(const struct udevice *dev)
  606. {
  607. if (!dev || !dev->driver->ops)
  608. return NULL;
  609. return dev->driver->ops;
  610. }
  611. enum uclass_id device_get_uclass_id(const struct udevice *dev)
  612. {
  613. return dev->uclass->uc_drv->id;
  614. }
  615. const char *dev_get_uclass_name(const struct udevice *dev)
  616. {
  617. if (!dev)
  618. return NULL;
  619. return dev->uclass->uc_drv->name;
  620. }
  621. bool device_has_children(const struct udevice *dev)
  622. {
  623. return !list_empty(&dev->child_head);
  624. }
  625. bool device_has_active_children(struct udevice *dev)
  626. {
  627. struct udevice *child;
  628. for (device_find_first_child(dev, &child);
  629. child;
  630. device_find_next_child(&child)) {
  631. if (device_active(child))
  632. return true;
  633. }
  634. return false;
  635. }
  636. bool device_is_last_sibling(struct udevice *dev)
  637. {
  638. struct udevice *parent = dev->parent;
  639. if (!parent)
  640. return false;
  641. return list_is_last(&dev->sibling_node, &parent->child_head);
  642. }
  643. void device_set_name_alloced(struct udevice *dev)
  644. {
  645. dev->flags |= DM_FLAG_NAME_ALLOCED;
  646. }
  647. int device_set_name(struct udevice *dev, const char *name)
  648. {
  649. name = strdup(name);
  650. if (!name)
  651. return -ENOMEM;
  652. dev->name = name;
  653. device_set_name_alloced(dev);
  654. return 0;
  655. }
  656. bool device_is_compatible(struct udevice *dev, const char *compat)
  657. {
  658. return ofnode_device_is_compatible(dev_ofnode(dev), compat);
  659. }
  660. bool of_machine_is_compatible(const char *compat)
  661. {
  662. const void *fdt = gd->fdt_blob;
  663. return !fdt_node_check_compatible(fdt, 0, compat);
  664. }
  665. int dev_disable_by_path(const char *path)
  666. {
  667. struct uclass *uc;
  668. ofnode node = ofnode_path(path);
  669. struct udevice *dev;
  670. int ret = 1;
  671. if (!of_live_active())
  672. return -ENOSYS;
  673. list_for_each_entry(uc, &gd->uclass_root, sibling_node) {
  674. ret = uclass_find_device_by_ofnode(uc->uc_drv->id, node, &dev);
  675. if (!ret)
  676. break;
  677. }
  678. if (ret)
  679. return ret;
  680. ret = device_remove(dev, DM_REMOVE_NORMAL);
  681. if (ret)
  682. return ret;
  683. ret = device_unbind(dev);
  684. if (ret)
  685. return ret;
  686. return ofnode_set_enabled(node, false);
  687. }
  688. int dev_enable_by_path(const char *path)
  689. {
  690. ofnode node = ofnode_path(path);
  691. ofnode pnode = ofnode_get_parent(node);
  692. struct udevice *parent;
  693. int ret = 1;
  694. if (!of_live_active())
  695. return -ENOSYS;
  696. ret = device_find_by_ofnode(pnode, &parent);
  697. if (ret)
  698. return ret;
  699. ret = ofnode_set_enabled(node, true);
  700. if (ret)
  701. return ret;
  702. return lists_bind_fdt(parent, node, NULL);
  703. }