cpu.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2018 NXP
  4. */
  5. #include <common.h>
  6. #include <clk.h>
  7. #include <cpu.h>
  8. #include <dm.h>
  9. #include <dm/device-internal.h>
  10. #include <dm/lists.h>
  11. #include <dm/uclass.h>
  12. #include <errno.h>
  13. #include <asm/arch/sci/sci.h>
  14. #include <asm/arch/sys_proto.h>
  15. #include <asm/arch-imx/cpu.h>
  16. #include <asm/armv8/cpu.h>
  17. #include <asm/armv8/mmu.h>
  18. #include <asm/mach-imx/boot_mode.h>
  19. DECLARE_GLOBAL_DATA_PTR;
  20. #define BT_PASSOVER_TAG 0x504F
  21. struct pass_over_info_t *get_pass_over_info(void)
  22. {
  23. struct pass_over_info_t *p =
  24. (struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
  25. if (p->barker != BT_PASSOVER_TAG ||
  26. p->len != sizeof(struct pass_over_info_t))
  27. return NULL;
  28. return p;
  29. }
  30. int arch_cpu_init(void)
  31. {
  32. struct pass_over_info_t *pass_over = get_pass_over_info();
  33. if (pass_over && pass_over->g_ap_mu == 0) {
  34. /*
  35. * When ap_mu is 0, means the U-Boot booted
  36. * from first container
  37. */
  38. sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
  39. }
  40. return 0;
  41. }
  42. int arch_cpu_init_dm(void)
  43. {
  44. struct udevice *devp;
  45. int node, ret;
  46. node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
  47. ret = device_bind_driver_to_node(gd->dm_root, "imx8_scu", "imx8_scu",
  48. offset_to_ofnode(node), &devp);
  49. if (ret) {
  50. printf("could not find scu %d\n", ret);
  51. return ret;
  52. }
  53. ret = device_probe(devp);
  54. if (ret) {
  55. printf("scu probe failed %d\n", ret);
  56. return ret;
  57. }
  58. return 0;
  59. }
  60. int print_bootinfo(void)
  61. {
  62. enum boot_device bt_dev = get_boot_device();
  63. puts("Boot: ");
  64. switch (bt_dev) {
  65. case SD1_BOOT:
  66. puts("SD0\n");
  67. break;
  68. case SD2_BOOT:
  69. puts("SD1\n");
  70. break;
  71. case SD3_BOOT:
  72. puts("SD2\n");
  73. break;
  74. case MMC1_BOOT:
  75. puts("MMC0\n");
  76. break;
  77. case MMC2_BOOT:
  78. puts("MMC1\n");
  79. break;
  80. case MMC3_BOOT:
  81. puts("MMC2\n");
  82. break;
  83. case FLEXSPI_BOOT:
  84. puts("FLEXSPI\n");
  85. break;
  86. case SATA_BOOT:
  87. puts("SATA\n");
  88. break;
  89. case NAND_BOOT:
  90. puts("NAND\n");
  91. break;
  92. case USB_BOOT:
  93. puts("USB\n");
  94. break;
  95. default:
  96. printf("Unknown device %u\n", bt_dev);
  97. break;
  98. }
  99. return 0;
  100. }
  101. enum boot_device get_boot_device(void)
  102. {
  103. enum boot_device boot_dev = SD1_BOOT;
  104. sc_rsrc_t dev_rsrc;
  105. sc_misc_get_boot_dev(-1, &dev_rsrc);
  106. switch (dev_rsrc) {
  107. case SC_R_SDHC_0:
  108. boot_dev = MMC1_BOOT;
  109. break;
  110. case SC_R_SDHC_1:
  111. boot_dev = SD2_BOOT;
  112. break;
  113. case SC_R_SDHC_2:
  114. boot_dev = SD3_BOOT;
  115. break;
  116. case SC_R_NAND:
  117. boot_dev = NAND_BOOT;
  118. break;
  119. case SC_R_FSPI_0:
  120. boot_dev = FLEXSPI_BOOT;
  121. break;
  122. case SC_R_SATA_0:
  123. boot_dev = SATA_BOOT;
  124. break;
  125. case SC_R_USB_0:
  126. case SC_R_USB_1:
  127. case SC_R_USB_2:
  128. boot_dev = USB_BOOT;
  129. break;
  130. default:
  131. break;
  132. }
  133. return boot_dev;
  134. }
  135. #ifdef CONFIG_ENV_IS_IN_MMC
  136. __weak int board_mmc_get_env_dev(int devno)
  137. {
  138. return CONFIG_SYS_MMC_ENV_DEV;
  139. }
  140. int mmc_get_env_dev(void)
  141. {
  142. sc_rsrc_t dev_rsrc;
  143. int devno;
  144. sc_misc_get_boot_dev(-1, &dev_rsrc);
  145. switch (dev_rsrc) {
  146. case SC_R_SDHC_0:
  147. devno = 0;
  148. break;
  149. case SC_R_SDHC_1:
  150. devno = 1;
  151. break;
  152. case SC_R_SDHC_2:
  153. devno = 2;
  154. break;
  155. default:
  156. /* If not boot from sd/mmc, use default value */
  157. return CONFIG_SYS_MMC_ENV_DEV;
  158. }
  159. return board_mmc_get_env_dev(devno);
  160. }
  161. #endif
  162. #define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */
  163. static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
  164. sc_faddr_t *addr_end)
  165. {
  166. sc_faddr_t start, end;
  167. int ret;
  168. bool owned;
  169. owned = sc_rm_is_memreg_owned(-1, mr);
  170. if (owned) {
  171. ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
  172. if (ret) {
  173. printf("Memreg get info failed, %d\n", ret);
  174. return -EINVAL;
  175. }
  176. debug("0x%llx -- 0x%llx\n", start, end);
  177. *addr_start = start;
  178. *addr_end = end;
  179. return 0;
  180. }
  181. return -EINVAL;
  182. }
  183. phys_size_t get_effective_memsize(void)
  184. {
  185. sc_rm_mr_t mr;
  186. sc_faddr_t start, end, end1;
  187. int err;
  188. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  189. for (mr = 0; mr < 64; mr++) {
  190. err = get_owned_memreg(mr, &start, &end);
  191. if (!err) {
  192. start = roundup(start, MEMSTART_ALIGNMENT);
  193. /* Too small memory region, not use it */
  194. if (start > end)
  195. continue;
  196. /* Find the memory region runs the U-Boot */
  197. if (start >= PHYS_SDRAM_1 && start <= end1 &&
  198. (start <= CONFIG_SYS_TEXT_BASE &&
  199. end >= CONFIG_SYS_TEXT_BASE)) {
  200. if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
  201. PHYS_SDRAM_1_SIZE))
  202. return (end - PHYS_SDRAM_1 + 1);
  203. else
  204. return PHYS_SDRAM_1_SIZE;
  205. }
  206. }
  207. }
  208. return PHYS_SDRAM_1_SIZE;
  209. }
  210. int dram_init(void)
  211. {
  212. sc_rm_mr_t mr;
  213. sc_faddr_t start, end, end1, end2;
  214. int err;
  215. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  216. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  217. for (mr = 0; mr < 64; mr++) {
  218. err = get_owned_memreg(mr, &start, &end);
  219. if (!err) {
  220. start = roundup(start, MEMSTART_ALIGNMENT);
  221. /* Too small memory region, not use it */
  222. if (start > end)
  223. continue;
  224. if (start >= PHYS_SDRAM_1 && start <= end1) {
  225. if ((end + 1) <= end1)
  226. gd->ram_size += end - start + 1;
  227. else
  228. gd->ram_size += end1 - start;
  229. } else if (start >= PHYS_SDRAM_2 && start <= end2) {
  230. if ((end + 1) <= end2)
  231. gd->ram_size += end - start + 1;
  232. else
  233. gd->ram_size += end2 - start;
  234. }
  235. }
  236. }
  237. /* If error, set to the default value */
  238. if (!gd->ram_size) {
  239. gd->ram_size = PHYS_SDRAM_1_SIZE;
  240. gd->ram_size += PHYS_SDRAM_2_SIZE;
  241. }
  242. return 0;
  243. }
  244. static void dram_bank_sort(int current_bank)
  245. {
  246. phys_addr_t start;
  247. phys_size_t size;
  248. while (current_bank > 0) {
  249. if (gd->bd->bi_dram[current_bank - 1].start >
  250. gd->bd->bi_dram[current_bank].start) {
  251. start = gd->bd->bi_dram[current_bank - 1].start;
  252. size = gd->bd->bi_dram[current_bank - 1].size;
  253. gd->bd->bi_dram[current_bank - 1].start =
  254. gd->bd->bi_dram[current_bank].start;
  255. gd->bd->bi_dram[current_bank - 1].size =
  256. gd->bd->bi_dram[current_bank].size;
  257. gd->bd->bi_dram[current_bank].start = start;
  258. gd->bd->bi_dram[current_bank].size = size;
  259. }
  260. current_bank--;
  261. }
  262. }
  263. int dram_init_banksize(void)
  264. {
  265. sc_rm_mr_t mr;
  266. sc_faddr_t start, end, end1, end2;
  267. int i = 0;
  268. int err;
  269. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  270. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  271. for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
  272. err = get_owned_memreg(mr, &start, &end);
  273. if (!err) {
  274. start = roundup(start, MEMSTART_ALIGNMENT);
  275. if (start > end) /* Small memory region, no use it */
  276. continue;
  277. if (start >= PHYS_SDRAM_1 && start <= end1) {
  278. gd->bd->bi_dram[i].start = start;
  279. if ((end + 1) <= end1)
  280. gd->bd->bi_dram[i].size =
  281. end - start + 1;
  282. else
  283. gd->bd->bi_dram[i].size = end1 - start;
  284. dram_bank_sort(i);
  285. i++;
  286. } else if (start >= PHYS_SDRAM_2 && start <= end2) {
  287. gd->bd->bi_dram[i].start = start;
  288. if ((end + 1) <= end2)
  289. gd->bd->bi_dram[i].size =
  290. end - start + 1;
  291. else
  292. gd->bd->bi_dram[i].size = end2 - start;
  293. dram_bank_sort(i);
  294. i++;
  295. }
  296. }
  297. }
  298. /* If error, set to the default value */
  299. if (!i) {
  300. gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
  301. gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
  302. gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
  303. gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
  304. }
  305. return 0;
  306. }
  307. static u64 get_block_attrs(sc_faddr_t addr_start)
  308. {
  309. u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
  310. PTE_BLOCK_PXN | PTE_BLOCK_UXN;
  311. if ((addr_start >= PHYS_SDRAM_1 &&
  312. addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
  313. (addr_start >= PHYS_SDRAM_2 &&
  314. addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
  315. return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
  316. return attr;
  317. }
  318. static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
  319. {
  320. sc_faddr_t end1, end2;
  321. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  322. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  323. if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
  324. if ((addr_end + 1) > end1)
  325. return end1 - addr_start;
  326. } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
  327. if ((addr_end + 1) > end2)
  328. return end2 - addr_start;
  329. }
  330. return (addr_end - addr_start + 1);
  331. }
  332. #define MAX_PTE_ENTRIES 512
  333. #define MAX_MEM_MAP_REGIONS 16
  334. static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
  335. struct mm_region *mem_map = imx8_mem_map;
  336. void enable_caches(void)
  337. {
  338. sc_rm_mr_t mr;
  339. sc_faddr_t start, end;
  340. int err, i;
  341. /* Create map for registers access from 0x1c000000 to 0x80000000*/
  342. imx8_mem_map[0].virt = 0x1c000000UL;
  343. imx8_mem_map[0].phys = 0x1c000000UL;
  344. imx8_mem_map[0].size = 0x64000000UL;
  345. imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  346. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
  347. i = 1;
  348. for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
  349. err = get_owned_memreg(mr, &start, &end);
  350. if (!err) {
  351. imx8_mem_map[i].virt = start;
  352. imx8_mem_map[i].phys = start;
  353. imx8_mem_map[i].size = get_block_size(start, end);
  354. imx8_mem_map[i].attrs = get_block_attrs(start);
  355. i++;
  356. }
  357. }
  358. if (i < MAX_MEM_MAP_REGIONS) {
  359. imx8_mem_map[i].size = 0;
  360. imx8_mem_map[i].attrs = 0;
  361. } else {
  362. puts("Error, need more MEM MAP REGIONS reserved\n");
  363. icache_enable();
  364. return;
  365. }
  366. for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
  367. debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
  368. i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
  369. imx8_mem_map[i].size, imx8_mem_map[i].attrs);
  370. }
  371. icache_enable();
  372. dcache_enable();
  373. }
  374. #ifndef CONFIG_SYS_DCACHE_OFF
  375. u64 get_page_table_size(void)
  376. {
  377. u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
  378. u64 size = 0;
  379. /*
  380. * For each memory region, the max table size:
  381. * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
  382. */
  383. size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
  384. /*
  385. * We need to duplicate our page table once to have an emergency pt to
  386. * resort to when splitting page tables later on
  387. */
  388. size *= 2;
  389. /*
  390. * We may need to split page tables later on if dcache settings change,
  391. * so reserve up to 4 (random pick) page tables for that.
  392. */
  393. size += one_pt * 4;
  394. return size;
  395. }
  396. #endif
  397. #define FUSE_MAC0_WORD0 708
  398. #define FUSE_MAC0_WORD1 709
  399. #define FUSE_MAC1_WORD0 710
  400. #define FUSE_MAC1_WORD1 711
  401. void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
  402. {
  403. u32 word[2], val[2] = {};
  404. int i, ret;
  405. if (dev_id == 0) {
  406. word[0] = FUSE_MAC0_WORD0;
  407. word[1] = FUSE_MAC0_WORD1;
  408. } else {
  409. word[0] = FUSE_MAC1_WORD0;
  410. word[1] = FUSE_MAC1_WORD1;
  411. }
  412. for (i = 0; i < 2; i++) {
  413. ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
  414. if (ret < 0)
  415. goto err;
  416. }
  417. mac[0] = val[0];
  418. mac[1] = val[0] >> 8;
  419. mac[2] = val[0] >> 16;
  420. mac[3] = val[0] >> 24;
  421. mac[4] = val[1];
  422. mac[5] = val[1] >> 8;
  423. debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
  424. __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
  425. return;
  426. err:
  427. printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
  428. }
  429. #if CONFIG_IS_ENABLED(CPU)
  430. struct cpu_imx_platdata {
  431. const char *name;
  432. const char *rev;
  433. const char *type;
  434. u32 cpurev;
  435. u32 freq_mhz;
  436. };
  437. u32 get_cpu_rev(void)
  438. {
  439. u32 id = 0, rev = 0;
  440. int ret;
  441. ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
  442. if (ret)
  443. return 0;
  444. rev = (id >> 5) & 0xf;
  445. id = (id & 0x1f) + MXC_SOC_IMX8; /* Dummy ID for chip */
  446. return (id << 12) | rev;
  447. }
  448. const char *get_imx8_type(u32 imxtype)
  449. {
  450. switch (imxtype) {
  451. case MXC_CPU_IMX8QXP:
  452. case MXC_CPU_IMX8QXP_A0:
  453. return "QXP";
  454. default:
  455. return "??";
  456. }
  457. }
  458. const char *get_imx8_rev(u32 rev)
  459. {
  460. switch (rev) {
  461. case CHIP_REV_A:
  462. return "A";
  463. case CHIP_REV_B:
  464. return "B";
  465. default:
  466. return "?";
  467. }
  468. }
  469. const char *get_core_name(void)
  470. {
  471. if (is_cortex_a35())
  472. return "A35";
  473. else if (is_cortex_a53())
  474. return "A53";
  475. else if (is_cortex_a72())
  476. return "A72";
  477. else
  478. return "?";
  479. }
  480. int cpu_imx_get_desc(struct udevice *dev, char *buf, int size)
  481. {
  482. struct cpu_imx_platdata *plat = dev_get_platdata(dev);
  483. if (size < 100)
  484. return -ENOSPC;
  485. snprintf(buf, size, "CPU: Freescale i.MX8%s Rev%s %s at %u MHz\n",
  486. plat->type, plat->rev, plat->name, plat->freq_mhz);
  487. return 0;
  488. }
  489. static int cpu_imx_get_info(struct udevice *dev, struct cpu_info *info)
  490. {
  491. struct cpu_imx_platdata *plat = dev_get_platdata(dev);
  492. info->cpu_freq = plat->freq_mhz * 1000;
  493. info->features = BIT(CPU_FEAT_L1_CACHE) | BIT(CPU_FEAT_MMU);
  494. return 0;
  495. }
  496. static int cpu_imx_get_count(struct udevice *dev)
  497. {
  498. return 4;
  499. }
  500. static int cpu_imx_get_vendor(struct udevice *dev, char *buf, int size)
  501. {
  502. snprintf(buf, size, "NXP");
  503. return 0;
  504. }
  505. static const struct cpu_ops cpu_imx8_ops = {
  506. .get_desc = cpu_imx_get_desc,
  507. .get_info = cpu_imx_get_info,
  508. .get_count = cpu_imx_get_count,
  509. .get_vendor = cpu_imx_get_vendor,
  510. };
  511. static const struct udevice_id cpu_imx8_ids[] = {
  512. { .compatible = "arm,cortex-a35" },
  513. { }
  514. };
  515. static int imx8_cpu_probe(struct udevice *dev)
  516. {
  517. struct cpu_imx_platdata *plat = dev_get_platdata(dev);
  518. struct clk cpu_clk;
  519. u32 cpurev;
  520. int ret;
  521. cpurev = get_cpu_rev();
  522. plat->cpurev = cpurev;
  523. plat->name = get_core_name();
  524. plat->rev = get_imx8_rev(cpurev & 0xFFF);
  525. plat->type = get_imx8_type((cpurev & 0xFF000) >> 12);
  526. ret = clk_get_by_index(dev, 0, &cpu_clk);
  527. if (ret) {
  528. debug("%s: Failed to get CPU clk: %d\n", __func__, ret);
  529. return 0;
  530. }
  531. plat->freq_mhz = clk_get_rate(&cpu_clk) / 1000000;
  532. return 0;
  533. }
  534. U_BOOT_DRIVER(cpu_imx8_drv) = {
  535. .name = "imx8x_cpu",
  536. .id = UCLASS_CPU,
  537. .of_match = cpu_imx8_ids,
  538. .ops = &cpu_imx8_ops,
  539. .probe = imx8_cpu_probe,
  540. .platdata_auto_alloc_size = sizeof(struct cpu_imx_platdata),
  541. .flags = DM_FLAG_PRE_RELOC,
  542. };
  543. #endif