cpu.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2018 NXP
  4. */
  5. #include <common.h>
  6. #include <clk.h>
  7. #include <dm.h>
  8. #include <dm/device-internal.h>
  9. #include <dm/lists.h>
  10. #include <dm/uclass.h>
  11. #include <errno.h>
  12. #include <asm/arch/sci/sci.h>
  13. #include <asm/arch/sys_proto.h>
  14. #include <asm/arch-imx/cpu.h>
  15. #include <asm/armv8/cpu.h>
  16. #include <asm/armv8/mmu.h>
  17. #include <asm/mach-imx/boot_mode.h>
  18. DECLARE_GLOBAL_DATA_PTR;
  19. u32 get_cpu_rev(void)
  20. {
  21. u32 id = 0, rev = 0;
  22. int ret;
  23. ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
  24. if (ret)
  25. return 0;
  26. rev = (id >> 5) & 0xf;
  27. id = (id & 0x1f) + MXC_SOC_IMX8; /* Dummy ID for chip */
  28. return (id << 12) | rev;
  29. }
  30. #ifdef CONFIG_DISPLAY_CPUINFO
  31. const char *get_imx8_type(u32 imxtype)
  32. {
  33. switch (imxtype) {
  34. case MXC_CPU_IMX8QXP:
  35. return "8QXP";
  36. default:
  37. return "??";
  38. }
  39. }
  40. const char *get_imx8_rev(u32 rev)
  41. {
  42. switch (rev) {
  43. case CHIP_REV_A:
  44. return "A";
  45. case CHIP_REV_B:
  46. return "B";
  47. default:
  48. return "?";
  49. }
  50. }
  51. const char *get_core_name(void)
  52. {
  53. if (is_cortex_a35())
  54. return "A35";
  55. else
  56. return "?";
  57. }
  58. int print_cpuinfo(void)
  59. {
  60. struct udevice *dev;
  61. struct clk cpu_clk;
  62. int ret;
  63. ret = uclass_get_device(UCLASS_CPU, 0, &dev);
  64. if (ret)
  65. return 0;
  66. ret = clk_get_by_index(dev, 0, &cpu_clk);
  67. if (ret) {
  68. dev_err(dev, "failed to clk\n");
  69. return 0;
  70. }
  71. u32 cpurev;
  72. cpurev = get_cpu_rev();
  73. printf("CPU: Freescale i.MX%s rev%s %s at %ld MHz\n",
  74. get_imx8_type((cpurev & 0xFF000) >> 12),
  75. get_imx8_rev((cpurev & 0xFFF)),
  76. get_core_name(),
  77. clk_get_rate(&cpu_clk) / 1000000);
  78. return 0;
  79. }
  80. #endif
  81. int print_bootinfo(void)
  82. {
  83. enum boot_device bt_dev = get_boot_device();
  84. puts("Boot: ");
  85. switch (bt_dev) {
  86. case SD1_BOOT:
  87. puts("SD0\n");
  88. break;
  89. case SD2_BOOT:
  90. puts("SD1\n");
  91. break;
  92. case SD3_BOOT:
  93. puts("SD2\n");
  94. break;
  95. case MMC1_BOOT:
  96. puts("MMC0\n");
  97. break;
  98. case MMC2_BOOT:
  99. puts("MMC1\n");
  100. break;
  101. case MMC3_BOOT:
  102. puts("MMC2\n");
  103. break;
  104. case FLEXSPI_BOOT:
  105. puts("FLEXSPI\n");
  106. break;
  107. case SATA_BOOT:
  108. puts("SATA\n");
  109. break;
  110. case NAND_BOOT:
  111. puts("NAND\n");
  112. break;
  113. case USB_BOOT:
  114. puts("USB\n");
  115. break;
  116. default:
  117. printf("Unknown device %u\n", bt_dev);
  118. break;
  119. }
  120. return 0;
  121. }
  122. enum boot_device get_boot_device(void)
  123. {
  124. enum boot_device boot_dev = SD1_BOOT;
  125. sc_rsrc_t dev_rsrc;
  126. sc_misc_get_boot_dev(-1, &dev_rsrc);
  127. switch (dev_rsrc) {
  128. case SC_R_SDHC_0:
  129. boot_dev = MMC1_BOOT;
  130. break;
  131. case SC_R_SDHC_1:
  132. boot_dev = SD2_BOOT;
  133. break;
  134. case SC_R_SDHC_2:
  135. boot_dev = SD3_BOOT;
  136. break;
  137. case SC_R_NAND:
  138. boot_dev = NAND_BOOT;
  139. break;
  140. case SC_R_FSPI_0:
  141. boot_dev = FLEXSPI_BOOT;
  142. break;
  143. case SC_R_SATA_0:
  144. boot_dev = SATA_BOOT;
  145. break;
  146. case SC_R_USB_0:
  147. case SC_R_USB_1:
  148. case SC_R_USB_2:
  149. boot_dev = USB_BOOT;
  150. break;
  151. default:
  152. break;
  153. }
  154. return boot_dev;
  155. }
  156. #ifdef CONFIG_ENV_IS_IN_MMC
  157. __weak int board_mmc_get_env_dev(int devno)
  158. {
  159. return CONFIG_SYS_MMC_ENV_DEV;
  160. }
  161. int mmc_get_env_dev(void)
  162. {
  163. sc_rsrc_t dev_rsrc;
  164. int devno;
  165. sc_misc_get_boot_dev(-1, &dev_rsrc);
  166. switch (dev_rsrc) {
  167. case SC_R_SDHC_0:
  168. devno = 0;
  169. break;
  170. case SC_R_SDHC_1:
  171. devno = 1;
  172. break;
  173. case SC_R_SDHC_2:
  174. devno = 2;
  175. break;
  176. default:
  177. /* If not boot from sd/mmc, use default value */
  178. return CONFIG_SYS_MMC_ENV_DEV;
  179. }
  180. return board_mmc_get_env_dev(devno);
  181. }
  182. #endif
  183. #define MEMSTART_ALIGNMENT SZ_2M /* Align the memory start with 2MB */
  184. static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
  185. sc_faddr_t *addr_end)
  186. {
  187. sc_faddr_t start, end;
  188. int ret;
  189. bool owned;
  190. owned = sc_rm_is_memreg_owned(-1, mr);
  191. if (owned) {
  192. ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
  193. if (ret) {
  194. printf("Memreg get info failed, %d\n", ret);
  195. return -EINVAL;
  196. }
  197. debug("0x%llx -- 0x%llx\n", start, end);
  198. *addr_start = start;
  199. *addr_end = end;
  200. return 0;
  201. }
  202. return -EINVAL;
  203. }
  204. phys_size_t get_effective_memsize(void)
  205. {
  206. sc_rm_mr_t mr;
  207. sc_faddr_t start, end, end1;
  208. int err;
  209. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  210. for (mr = 0; mr < 64; mr++) {
  211. err = get_owned_memreg(mr, &start, &end);
  212. if (!err) {
  213. start = roundup(start, MEMSTART_ALIGNMENT);
  214. /* Too small memory region, not use it */
  215. if (start > end)
  216. continue;
  217. /* Find the memory region runs the u-boot */
  218. if (start >= PHYS_SDRAM_1 && start <= end1 &&
  219. (start <= CONFIG_SYS_TEXT_BASE &&
  220. end >= CONFIG_SYS_TEXT_BASE)) {
  221. if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
  222. PHYS_SDRAM_1_SIZE))
  223. return (end - PHYS_SDRAM_1 + 1);
  224. else
  225. return PHYS_SDRAM_1_SIZE;
  226. }
  227. }
  228. }
  229. return PHYS_SDRAM_1_SIZE;
  230. }
  231. int dram_init(void)
  232. {
  233. sc_rm_mr_t mr;
  234. sc_faddr_t start, end, end1, end2;
  235. int err;
  236. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  237. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  238. for (mr = 0; mr < 64; mr++) {
  239. err = get_owned_memreg(mr, &start, &end);
  240. if (!err) {
  241. start = roundup(start, MEMSTART_ALIGNMENT);
  242. /* Too small memory region, not use it */
  243. if (start > end)
  244. continue;
  245. if (start >= PHYS_SDRAM_1 && start <= end1) {
  246. if ((end + 1) <= end1)
  247. gd->ram_size += end - start + 1;
  248. else
  249. gd->ram_size += end1 - start;
  250. } else if (start >= PHYS_SDRAM_2 && start <= end2) {
  251. if ((end + 1) <= end2)
  252. gd->ram_size += end - start + 1;
  253. else
  254. gd->ram_size += end2 - start;
  255. }
  256. }
  257. }
  258. /* If error, set to the default value */
  259. if (!gd->ram_size) {
  260. gd->ram_size = PHYS_SDRAM_1_SIZE;
  261. gd->ram_size += PHYS_SDRAM_2_SIZE;
  262. }
  263. return 0;
  264. }
  265. static void dram_bank_sort(int current_bank)
  266. {
  267. phys_addr_t start;
  268. phys_size_t size;
  269. while (current_bank > 0) {
  270. if (gd->bd->bi_dram[current_bank - 1].start >
  271. gd->bd->bi_dram[current_bank].start) {
  272. start = gd->bd->bi_dram[current_bank - 1].start;
  273. size = gd->bd->bi_dram[current_bank - 1].size;
  274. gd->bd->bi_dram[current_bank - 1].start =
  275. gd->bd->bi_dram[current_bank].start;
  276. gd->bd->bi_dram[current_bank - 1].size =
  277. gd->bd->bi_dram[current_bank].size;
  278. gd->bd->bi_dram[current_bank].start = start;
  279. gd->bd->bi_dram[current_bank].size = size;
  280. }
  281. current_bank--;
  282. }
  283. }
  284. int dram_init_banksize(void)
  285. {
  286. sc_rm_mr_t mr;
  287. sc_faddr_t start, end, end1, end2;
  288. int i = 0;
  289. int err;
  290. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  291. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  292. for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
  293. err = get_owned_memreg(mr, &start, &end);
  294. if (!err) {
  295. start = roundup(start, MEMSTART_ALIGNMENT);
  296. if (start > end) /* Small memory region, no use it */
  297. continue;
  298. if (start >= PHYS_SDRAM_1 && start <= end1) {
  299. gd->bd->bi_dram[i].start = start;
  300. if ((end + 1) <= end1)
  301. gd->bd->bi_dram[i].size =
  302. end - start + 1;
  303. else
  304. gd->bd->bi_dram[i].size = end1 - start;
  305. dram_bank_sort(i);
  306. i++;
  307. } else if (start >= PHYS_SDRAM_2 && start <= end2) {
  308. gd->bd->bi_dram[i].start = start;
  309. if ((end + 1) <= end2)
  310. gd->bd->bi_dram[i].size =
  311. end - start + 1;
  312. else
  313. gd->bd->bi_dram[i].size = end2 - start;
  314. dram_bank_sort(i);
  315. i++;
  316. }
  317. }
  318. }
  319. /* If error, set to the default value */
  320. if (!i) {
  321. gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
  322. gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
  323. gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
  324. gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
  325. }
  326. return 0;
  327. }
  328. static u64 get_block_attrs(sc_faddr_t addr_start)
  329. {
  330. u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
  331. PTE_BLOCK_PXN | PTE_BLOCK_UXN;
  332. if ((addr_start >= PHYS_SDRAM_1 &&
  333. addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
  334. (addr_start >= PHYS_SDRAM_2 &&
  335. addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
  336. return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
  337. return attr;
  338. }
  339. static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
  340. {
  341. sc_faddr_t end1, end2;
  342. end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
  343. end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
  344. if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
  345. if ((addr_end + 1) > end1)
  346. return end1 - addr_start;
  347. } else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
  348. if ((addr_end + 1) > end2)
  349. return end2 - addr_start;
  350. }
  351. return (addr_end - addr_start + 1);
  352. }
  353. #define MAX_PTE_ENTRIES 512
  354. #define MAX_MEM_MAP_REGIONS 16
  355. static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
  356. struct mm_region *mem_map = imx8_mem_map;
  357. void enable_caches(void)
  358. {
  359. sc_rm_mr_t mr;
  360. sc_faddr_t start, end;
  361. int err, i;
  362. /* Create map for registers access from 0x1c000000 to 0x80000000*/
  363. imx8_mem_map[0].virt = 0x1c000000UL;
  364. imx8_mem_map[0].phys = 0x1c000000UL;
  365. imx8_mem_map[0].size = 0x64000000UL;
  366. imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  367. PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
  368. i = 1;
  369. for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
  370. err = get_owned_memreg(mr, &start, &end);
  371. if (!err) {
  372. imx8_mem_map[i].virt = start;
  373. imx8_mem_map[i].phys = start;
  374. imx8_mem_map[i].size = get_block_size(start, end);
  375. imx8_mem_map[i].attrs = get_block_attrs(start);
  376. i++;
  377. }
  378. }
  379. if (i < MAX_MEM_MAP_REGIONS) {
  380. imx8_mem_map[i].size = 0;
  381. imx8_mem_map[i].attrs = 0;
  382. } else {
  383. puts("Error, need more MEM MAP REGIONS reserved\n");
  384. icache_enable();
  385. return;
  386. }
  387. for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
  388. debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
  389. i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
  390. imx8_mem_map[i].size, imx8_mem_map[i].attrs);
  391. }
  392. icache_enable();
  393. dcache_enable();
  394. }
  395. #ifndef CONFIG_SYS_DCACHE_OFF
  396. u64 get_page_table_size(void)
  397. {
  398. u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
  399. u64 size = 0;
  400. /*
  401. * For each memory region, the max table size:
  402. * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
  403. */
  404. size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
  405. /*
  406. * We need to duplicate our page table once to have an emergency pt to
  407. * resort to when splitting page tables later on
  408. */
  409. size *= 2;
  410. /*
  411. * We may need to split page tables later on if dcache settings change,
  412. * so reserve up to 4 (random pick) page tables for that.
  413. */
  414. size += one_pt * 4;
  415. return size;
  416. }
  417. #endif