cpu.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. /*
  2. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #include <common.h>
  7. #include <asm/io.h>
  8. #include <linux/errno.h>
  9. #include <asm/system.h>
  10. #include <asm/armv8/mmu.h>
  11. #include <asm/io.h>
  12. #include <asm/arch/fsl_serdes.h>
  13. #include <asm/arch/soc.h>
  14. #include <asm/arch/cpu.h>
  15. #include <asm/arch/speed.h>
  16. #ifdef CONFIG_MP
  17. #include <asm/arch/mp.h>
  18. #endif
  19. #include <efi_loader.h>
  20. #include <fm_eth.h>
  21. #include <fsl-mc/fsl_mc.h>
  22. #ifdef CONFIG_FSL_ESDHC
  23. #include <fsl_esdhc.h>
  24. #endif
  25. #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
  26. #include <asm/armv8/sec_firmware.h>
  27. #endif
  28. DECLARE_GLOBAL_DATA_PTR;
  29. struct mm_region *mem_map = early_map;
  30. void cpu_name(char *name)
  31. {
  32. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  33. unsigned int i, svr, ver;
  34. svr = gur_in32(&gur->svr);
  35. ver = SVR_SOC_VER(svr);
  36. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  37. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  38. strcpy(name, cpu_type_list[i].name);
  39. if (IS_E_PROCESSOR(svr))
  40. strcat(name, "E");
  41. sprintf(name + strlen(name), " Rev%d.%d",
  42. SVR_MAJ(svr), SVR_MIN(svr));
  43. break;
  44. }
  45. if (i == ARRAY_SIZE(cpu_type_list))
  46. strcpy(name, "unknown");
  47. }
  48. #ifndef CONFIG_SYS_DCACHE_OFF
  49. /*
  50. * To start MMU before DDR is available, we create MMU table in SRAM.
  51. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  52. * levels of translation tables here to cover 40-bit address space.
  53. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  54. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  55. * Note, the debug print in cache_v8.c is not usable for debugging
  56. * these early MMU tables because UART is not yet available.
  57. */
  58. static inline void early_mmu_setup(void)
  59. {
  60. unsigned int el = current_el();
  61. /* global data is already setup, no allocation yet */
  62. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  63. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  64. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  65. /* Create early page tables */
  66. setup_pgtables();
  67. /* point TTBR to the new table */
  68. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  69. get_tcr(el, NULL, NULL) &
  70. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  71. MEMORY_ATTRIBUTES);
  72. set_sctlr(get_sctlr() | CR_M);
  73. }
  74. /*
  75. * The final tables look similar to early tables, but different in detail.
  76. * These tables are in DRAM. Sub tables are added to enable cache for
  77. * QBMan and OCRAM.
  78. *
  79. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  80. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  81. */
  82. static inline void final_mmu_setup(void)
  83. {
  84. u64 tlb_addr_save = gd->arch.tlb_addr;
  85. unsigned int el = current_el();
  86. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  87. int index;
  88. #endif
  89. mem_map = final_map;
  90. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  91. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  92. if (el == 3) {
  93. /*
  94. * Only use gd->arch.secure_ram if the address is
  95. * recalculated. Align to 4KB for MMU table.
  96. */
  97. /* put page tables in secure ram */
  98. index = ARRAY_SIZE(final_map) - 2;
  99. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  100. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  101. final_map[index].phys = final_map[index].virt;
  102. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  103. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  104. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  105. tlb_addr_save = gd->arch.tlb_addr;
  106. } else {
  107. /* Use allocated (board_f.c) memory for TLB */
  108. tlb_addr_save = gd->arch.tlb_allocated;
  109. gd->arch.tlb_addr = tlb_addr_save;
  110. }
  111. }
  112. #endif
  113. /* Reset the fill ptr */
  114. gd->arch.tlb_fillptr = tlb_addr_save;
  115. /* Create normal system page tables */
  116. setup_pgtables();
  117. /* Create emergency page tables */
  118. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  119. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  120. setup_pgtables();
  121. gd->arch.tlb_addr = tlb_addr_save;
  122. /* flush new MMU table */
  123. flush_dcache_range(gd->arch.tlb_addr,
  124. gd->arch.tlb_addr + gd->arch.tlb_size);
  125. /* point TTBR to the new table */
  126. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  127. MEMORY_ATTRIBUTES);
  128. /*
  129. * EL3 MMU is already enabled, just need to invalidate TLB to load the
  130. * new table. The new table is compatible with the current table, if
  131. * MMU somehow walks through the new table before invalidation TLB,
  132. * it still works. So we don't need to turn off MMU here.
  133. * When EL2 MMU table is created by calling this function, MMU needs
  134. * to be enabled.
  135. */
  136. set_sctlr(get_sctlr() | CR_M);
  137. }
  138. u64 get_page_table_size(void)
  139. {
  140. return 0x10000;
  141. }
  142. int arch_cpu_init(void)
  143. {
  144. icache_enable();
  145. __asm_invalidate_dcache_all();
  146. __asm_invalidate_tlb_all();
  147. early_mmu_setup();
  148. set_sctlr(get_sctlr() | CR_C);
  149. return 0;
  150. }
  151. void mmu_setup(void)
  152. {
  153. final_mmu_setup();
  154. }
  155. /*
  156. * This function is called from common/board_r.c.
  157. * It recreates MMU table in main memory.
  158. */
  159. void enable_caches(void)
  160. {
  161. mmu_setup();
  162. __asm_invalidate_tlb_all();
  163. icache_enable();
  164. dcache_enable();
  165. }
  166. #endif
  167. u32 initiator_type(u32 cluster, int init_id)
  168. {
  169. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  170. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  171. u32 type = 0;
  172. type = gur_in32(&gur->tp_ityp[idx]);
  173. if (type & TP_ITYP_AV)
  174. return type;
  175. return 0;
  176. }
  177. u32 cpu_pos_mask(void)
  178. {
  179. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  180. int i = 0;
  181. u32 cluster, type, mask = 0;
  182. do {
  183. int j;
  184. cluster = gur_in32(&gur->tp_cluster[i].lower);
  185. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  186. type = initiator_type(cluster, j);
  187. if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
  188. mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
  189. }
  190. i++;
  191. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  192. return mask;
  193. }
  194. u32 cpu_mask(void)
  195. {
  196. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  197. int i = 0, count = 0;
  198. u32 cluster, type, mask = 0;
  199. do {
  200. int j;
  201. cluster = gur_in32(&gur->tp_cluster[i].lower);
  202. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  203. type = initiator_type(cluster, j);
  204. if (type) {
  205. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  206. mask |= 1 << count;
  207. count++;
  208. }
  209. }
  210. i++;
  211. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  212. return mask;
  213. }
  214. /*
  215. * Return the number of cores on this SOC.
  216. */
  217. int cpu_numcores(void)
  218. {
  219. return hweight32(cpu_mask());
  220. }
  221. int fsl_qoriq_core_to_cluster(unsigned int core)
  222. {
  223. struct ccsr_gur __iomem *gur =
  224. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  225. int i = 0, count = 0;
  226. u32 cluster;
  227. do {
  228. int j;
  229. cluster = gur_in32(&gur->tp_cluster[i].lower);
  230. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  231. if (initiator_type(cluster, j)) {
  232. if (count == core)
  233. return i;
  234. count++;
  235. }
  236. }
  237. i++;
  238. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  239. return -1; /* cannot identify the cluster */
  240. }
  241. u32 fsl_qoriq_core_to_type(unsigned int core)
  242. {
  243. struct ccsr_gur __iomem *gur =
  244. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  245. int i = 0, count = 0;
  246. u32 cluster, type;
  247. do {
  248. int j;
  249. cluster = gur_in32(&gur->tp_cluster[i].lower);
  250. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  251. type = initiator_type(cluster, j);
  252. if (type) {
  253. if (count == core)
  254. return type;
  255. count++;
  256. }
  257. }
  258. i++;
  259. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  260. return -1; /* cannot identify the cluster */
  261. }
  262. #ifndef CONFIG_FSL_LSCH3
  263. uint get_svr(void)
  264. {
  265. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  266. return gur_in32(&gur->svr);
  267. }
  268. #endif
  269. #ifdef CONFIG_DISPLAY_CPUINFO
  270. int print_cpuinfo(void)
  271. {
  272. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  273. struct sys_info sysinfo;
  274. char buf[32];
  275. unsigned int i, core;
  276. u32 type, rcw, svr = gur_in32(&gur->svr);
  277. puts("SoC: ");
  278. cpu_name(buf);
  279. printf(" %s (0x%x)\n", buf, svr);
  280. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  281. get_sys_info(&sysinfo);
  282. puts("Clock Configuration:");
  283. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  284. if (!(i % 3))
  285. puts("\n ");
  286. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  287. printf("CPU%d(%s):%-4s MHz ", core,
  288. type == TY_ITYP_VER_A7 ? "A7 " :
  289. (type == TY_ITYP_VER_A53 ? "A53" :
  290. (type == TY_ITYP_VER_A57 ? "A57" :
  291. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  292. strmhz(buf, sysinfo.freq_processor[core]));
  293. }
  294. printf("\n Bus: %-4s MHz ",
  295. strmhz(buf, sysinfo.freq_systembus));
  296. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  297. #ifdef CONFIG_SYS_DPAA_FMAN
  298. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  299. #endif
  300. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  301. if (soc_has_dp_ddr()) {
  302. printf(" DP-DDR: %-4s MT/s",
  303. strmhz(buf, sysinfo.freq_ddrbus2));
  304. }
  305. #endif
  306. puts("\n");
  307. /*
  308. * Display the RCW, so that no one gets confused as to what RCW
  309. * we're actually using for this boot.
  310. */
  311. puts("Reset Configuration Word (RCW):");
  312. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  313. rcw = gur_in32(&gur->rcwsr[i]);
  314. if ((i % 4) == 0)
  315. printf("\n %08x:", i * 4);
  316. printf(" %08x", rcw);
  317. }
  318. puts("\n");
  319. return 0;
  320. }
  321. #endif
  322. #ifdef CONFIG_FSL_ESDHC
  323. int cpu_mmc_init(bd_t *bis)
  324. {
  325. return fsl_esdhc_mmc_init(bis);
  326. }
  327. #endif
  328. int cpu_eth_init(bd_t *bis)
  329. {
  330. int error = 0;
  331. #ifdef CONFIG_FSL_MC_ENET
  332. error = fsl_mc_ldpaa_init(bis);
  333. #endif
  334. #ifdef CONFIG_FMAN_ENET
  335. fm_standard_init(bis);
  336. #endif
  337. return error;
  338. }
  339. int arch_early_init_r(void)
  340. {
  341. #ifdef CONFIG_MP
  342. int rv = 1;
  343. u32 psci_ver = 0xffffffff;
  344. #endif
  345. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  346. erratum_a009635();
  347. #endif
  348. #ifdef CONFIG_MP
  349. #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI)
  350. /* Check the psci version to determine if the psci is supported */
  351. psci_ver = sec_firmware_support_psci_version();
  352. #endif
  353. if (psci_ver == 0xffffffff) {
  354. rv = fsl_layerscape_wake_seconday_cores();
  355. if (rv)
  356. printf("Did not wake secondary cores\n");
  357. }
  358. #endif
  359. #ifdef CONFIG_SYS_HAS_SERDES
  360. fsl_serdes_init();
  361. #endif
  362. #ifdef CONFIG_FMAN_ENET
  363. fman_enet_init();
  364. #endif
  365. return 0;
  366. }
  367. int timer_init(void)
  368. {
  369. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  370. #ifdef CONFIG_FSL_LSCH3
  371. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  372. #endif
  373. #ifdef CONFIG_LS2080A
  374. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  375. u32 svr_dev_id;
  376. #endif
  377. #ifdef COUNTER_FREQUENCY_REAL
  378. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  379. /* Update with accurate clock frequency */
  380. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  381. #endif
  382. #ifdef CONFIG_FSL_LSCH3
  383. /* Enable timebase for all clusters.
  384. * It is safe to do so even some clusters are not enabled.
  385. */
  386. out_le32(cltbenr, 0xf);
  387. #endif
  388. #ifdef CONFIG_LS2080A
  389. /*
  390. * In certain Layerscape SoCs, the clock for each core's
  391. * has an enable bit in the PMU Physical Core Time Base Enable
  392. * Register (PCTBENR), which allows the watchdog to operate.
  393. */
  394. setbits_le32(pctbenr, 0xff);
  395. /*
  396. * For LS2080A SoC and its personalities, timer controller
  397. * offset is different
  398. */
  399. svr_dev_id = get_svr() >> 16;
  400. if (svr_dev_id == SVR_DEV_LS2080A)
  401. cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
  402. #endif
  403. /* Enable clock for timer
  404. * This is a global setting.
  405. */
  406. out_le32(cntcr, 0x1);
  407. return 0;
  408. }
  409. __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  410. void __efi_runtime reset_cpu(ulong addr)
  411. {
  412. u32 val;
  413. /* Raise RESET_REQ_B */
  414. val = scfg_in32(rstcr);
  415. val |= 0x02;
  416. scfg_out32(rstcr, val);
  417. }
  418. #ifdef CONFIG_EFI_LOADER
  419. void __efi_runtime EFIAPI efi_reset_system(
  420. enum efi_reset_type reset_type,
  421. efi_status_t reset_status,
  422. unsigned long data_size, void *reset_data)
  423. {
  424. switch (reset_type) {
  425. case EFI_RESET_COLD:
  426. case EFI_RESET_WARM:
  427. reset_cpu(0);
  428. break;
  429. case EFI_RESET_SHUTDOWN:
  430. /* Nothing we can do */
  431. break;
  432. }
  433. while (1) { }
  434. }
  435. void efi_reset_system_init(void)
  436. {
  437. efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
  438. }
  439. #endif
  440. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  441. {
  442. phys_size_t ram_top = ram_size;
  443. #ifdef CONFIG_SYS_MEM_TOP_HIDE
  444. #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function
  445. #endif
  446. /* Carve the MC private DRAM block from the end of DRAM */
  447. #ifdef CONFIG_FSL_MC_ENET
  448. ram_top -= mc_get_dram_block_size();
  449. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  450. #endif
  451. return ram_top;
  452. }