cpu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #include <common.h>
  7. #include <asm/io.h>
  8. #include <asm/errno.h>
  9. #include <asm/system.h>
  10. #include <asm/armv8/mmu.h>
  11. #include <asm/io.h>
  12. #include <asm/arch/fsl_serdes.h>
  13. #include <asm/arch/soc.h>
  14. #include <asm/arch/cpu.h>
  15. #include <asm/arch/speed.h>
  16. #ifdef CONFIG_MP
  17. #include <asm/arch/mp.h>
  18. #endif
  19. #include <fm_eth.h>
  20. #include <fsl-mc/fsl_mc.h>
  21. #ifdef CONFIG_FSL_ESDHC
  22. #include <fsl_esdhc.h>
  23. #endif
  24. #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
  25. #include <asm/armv8/sec_firmware.h>
  26. #endif
  27. DECLARE_GLOBAL_DATA_PTR;
  28. struct mm_region *mem_map = early_map;
  29. void cpu_name(char *name)
  30. {
  31. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  32. unsigned int i, svr, ver;
  33. svr = gur_in32(&gur->svr);
  34. ver = SVR_SOC_VER(svr);
  35. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  36. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  37. strcpy(name, cpu_type_list[i].name);
  38. if (IS_E_PROCESSOR(svr))
  39. strcat(name, "E");
  40. break;
  41. }
  42. if (i == ARRAY_SIZE(cpu_type_list))
  43. strcpy(name, "unknown");
  44. }
  45. #ifndef CONFIG_SYS_DCACHE_OFF
  46. /*
  47. * To start MMU before DDR is available, we create MMU table in SRAM.
  48. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  49. * levels of translation tables here to cover 40-bit address space.
  50. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  51. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  52. * Note, the debug print in cache_v8.c is not usable for debugging
  53. * these early MMU tables because UART is not yet available.
  54. */
  55. static inline void early_mmu_setup(void)
  56. {
  57. unsigned int el = current_el();
  58. /* global data is already setup, no allocation yet */
  59. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  60. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  61. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  62. /* Create early page tables */
  63. setup_pgtables();
  64. /* point TTBR to the new table */
  65. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  66. get_tcr(el, NULL, NULL) &
  67. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  68. MEMORY_ATTRIBUTES);
  69. set_sctlr(get_sctlr() | CR_M);
  70. }
  71. /*
  72. * The final tables look similar to early tables, but different in detail.
  73. * These tables are in DRAM. Sub tables are added to enable cache for
  74. * QBMan and OCRAM.
  75. *
  76. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  77. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  78. */
  79. static inline void final_mmu_setup(void)
  80. {
  81. u64 tlb_addr_save = gd->arch.tlb_addr;
  82. unsigned int el = current_el();
  83. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  84. int index;
  85. #endif
  86. mem_map = final_map;
  87. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  88. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  89. if (el == 3) {
  90. /*
  91. * Only use gd->arch.secure_ram if the address is
  92. * recalculated. Align to 4KB for MMU table.
  93. */
  94. /* put page tables in secure ram */
  95. index = ARRAY_SIZE(final_map) - 2;
  96. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  97. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  98. final_map[index].phys = final_map[index].virt;
  99. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  100. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  101. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  102. tlb_addr_save = gd->arch.tlb_addr;
  103. } else {
  104. /* Use allocated (board_f.c) memory for TLB */
  105. tlb_addr_save = gd->arch.tlb_allocated;
  106. gd->arch.tlb_addr = tlb_addr_save;
  107. }
  108. }
  109. #endif
  110. /* Reset the fill ptr */
  111. gd->arch.tlb_fillptr = tlb_addr_save;
  112. /* Create normal system page tables */
  113. setup_pgtables();
  114. /* Create emergency page tables */
  115. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  116. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  117. setup_pgtables();
  118. gd->arch.tlb_addr = tlb_addr_save;
  119. /* flush new MMU table */
  120. flush_dcache_range(gd->arch.tlb_addr,
  121. gd->arch.tlb_addr + gd->arch.tlb_size);
  122. /* point TTBR to the new table */
  123. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  124. MEMORY_ATTRIBUTES);
  125. /*
  126. * EL3 MMU is already enabled, just need to invalidate TLB to load the
  127. * new table. The new table is compatible with the current table, if
  128. * MMU somehow walks through the new table before invalidation TLB,
  129. * it still works. So we don't need to turn off MMU here.
  130. * When EL2 MMU table is created by calling this function, MMU needs
  131. * to be enabled.
  132. */
  133. set_sctlr(get_sctlr() | CR_M);
  134. }
  135. u64 get_page_table_size(void)
  136. {
  137. return 0x10000;
  138. }
  139. int arch_cpu_init(void)
  140. {
  141. icache_enable();
  142. __asm_invalidate_dcache_all();
  143. __asm_invalidate_tlb_all();
  144. early_mmu_setup();
  145. set_sctlr(get_sctlr() | CR_C);
  146. return 0;
  147. }
  148. void mmu_setup(void)
  149. {
  150. final_mmu_setup();
  151. }
  152. /*
  153. * This function is called from common/board_r.c.
  154. * It recreates MMU table in main memory.
  155. */
  156. void enable_caches(void)
  157. {
  158. mmu_setup();
  159. __asm_invalidate_tlb_all();
  160. icache_enable();
  161. dcache_enable();
  162. }
  163. #endif
  164. static inline u32 initiator_type(u32 cluster, int init_id)
  165. {
  166. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  167. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  168. u32 type = 0;
  169. type = gur_in32(&gur->tp_ityp[idx]);
  170. if (type & TP_ITYP_AV)
  171. return type;
  172. return 0;
  173. }
  174. u32 cpu_mask(void)
  175. {
  176. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  177. int i = 0, count = 0;
  178. u32 cluster, type, mask = 0;
  179. do {
  180. int j;
  181. cluster = gur_in32(&gur->tp_cluster[i].lower);
  182. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  183. type = initiator_type(cluster, j);
  184. if (type) {
  185. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  186. mask |= 1 << count;
  187. count++;
  188. }
  189. }
  190. i++;
  191. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  192. return mask;
  193. }
  194. /*
  195. * Return the number of cores on this SOC.
  196. */
  197. int cpu_numcores(void)
  198. {
  199. return hweight32(cpu_mask());
  200. }
  201. int fsl_qoriq_core_to_cluster(unsigned int core)
  202. {
  203. struct ccsr_gur __iomem *gur =
  204. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  205. int i = 0, count = 0;
  206. u32 cluster;
  207. do {
  208. int j;
  209. cluster = gur_in32(&gur->tp_cluster[i].lower);
  210. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  211. if (initiator_type(cluster, j)) {
  212. if (count == core)
  213. return i;
  214. count++;
  215. }
  216. }
  217. i++;
  218. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  219. return -1; /* cannot identify the cluster */
  220. }
  221. u32 fsl_qoriq_core_to_type(unsigned int core)
  222. {
  223. struct ccsr_gur __iomem *gur =
  224. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  225. int i = 0, count = 0;
  226. u32 cluster, type;
  227. do {
  228. int j;
  229. cluster = gur_in32(&gur->tp_cluster[i].lower);
  230. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  231. type = initiator_type(cluster, j);
  232. if (type) {
  233. if (count == core)
  234. return type;
  235. count++;
  236. }
  237. }
  238. i++;
  239. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  240. return -1; /* cannot identify the cluster */
  241. }
  242. uint get_svr(void)
  243. {
  244. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  245. return gur_in32(&gur->svr);
  246. }
  247. #ifdef CONFIG_DISPLAY_CPUINFO
  248. int print_cpuinfo(void)
  249. {
  250. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  251. struct sys_info sysinfo;
  252. char buf[32];
  253. unsigned int i, core;
  254. u32 type, rcw, svr = gur_in32(&gur->svr);
  255. puts("SoC: ");
  256. cpu_name(buf);
  257. printf(" %s (0x%x)\n", buf, svr);
  258. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  259. get_sys_info(&sysinfo);
  260. puts("Clock Configuration:");
  261. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  262. if (!(i % 3))
  263. puts("\n ");
  264. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  265. printf("CPU%d(%s):%-4s MHz ", core,
  266. type == TY_ITYP_VER_A7 ? "A7 " :
  267. (type == TY_ITYP_VER_A53 ? "A53" :
  268. (type == TY_ITYP_VER_A57 ? "A57" :
  269. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  270. strmhz(buf, sysinfo.freq_processor[core]));
  271. }
  272. printf("\n Bus: %-4s MHz ",
  273. strmhz(buf, sysinfo.freq_systembus));
  274. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  275. #ifdef CONFIG_SYS_DPAA_FMAN
  276. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  277. #endif
  278. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  279. if (soc_has_dp_ddr()) {
  280. printf(" DP-DDR: %-4s MT/s",
  281. strmhz(buf, sysinfo.freq_ddrbus2));
  282. }
  283. #endif
  284. puts("\n");
  285. /*
  286. * Display the RCW, so that no one gets confused as to what RCW
  287. * we're actually using for this boot.
  288. */
  289. puts("Reset Configuration Word (RCW):");
  290. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  291. rcw = gur_in32(&gur->rcwsr[i]);
  292. if ((i % 4) == 0)
  293. printf("\n %08x:", i * 4);
  294. printf(" %08x", rcw);
  295. }
  296. puts("\n");
  297. return 0;
  298. }
  299. #endif
  300. #ifdef CONFIG_FSL_ESDHC
  301. int cpu_mmc_init(bd_t *bis)
  302. {
  303. return fsl_esdhc_mmc_init(bis);
  304. }
  305. #endif
  306. int cpu_eth_init(bd_t *bis)
  307. {
  308. int error = 0;
  309. #ifdef CONFIG_FSL_MC_ENET
  310. error = fsl_mc_ldpaa_init(bis);
  311. #endif
  312. #ifdef CONFIG_FMAN_ENET
  313. fm_standard_init(bis);
  314. #endif
  315. return error;
  316. }
  317. int arch_early_init_r(void)
  318. {
  319. #ifdef CONFIG_MP
  320. int rv = 1;
  321. u32 psci_ver = 0xffffffff;
  322. #endif
  323. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  324. erratum_a009635();
  325. #endif
  326. #ifdef CONFIG_MP
  327. #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI)
  328. /* Check the psci version to determine if the psci is supported */
  329. psci_ver = sec_firmware_support_psci_version();
  330. #endif
  331. if (psci_ver == 0xffffffff) {
  332. rv = fsl_layerscape_wake_seconday_cores();
  333. if (rv)
  334. printf("Did not wake secondary cores\n");
  335. }
  336. #endif
  337. #ifdef CONFIG_SYS_HAS_SERDES
  338. fsl_serdes_init();
  339. #endif
  340. #ifdef CONFIG_FMAN_ENET
  341. fman_enet_init();
  342. #endif
  343. return 0;
  344. }
  345. int timer_init(void)
  346. {
  347. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  348. #ifdef CONFIG_FSL_LSCH3
  349. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  350. #endif
  351. #ifdef CONFIG_LS2080A
  352. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  353. #endif
  354. #ifdef COUNTER_FREQUENCY_REAL
  355. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  356. /* Update with accurate clock frequency */
  357. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  358. #endif
  359. #ifdef CONFIG_FSL_LSCH3
  360. /* Enable timebase for all clusters.
  361. * It is safe to do so even some clusters are not enabled.
  362. */
  363. out_le32(cltbenr, 0xf);
  364. #endif
  365. #ifdef CONFIG_LS2080A
  366. /*
  367. * In certain Layerscape SoCs, the clock for each core's
  368. * has an enable bit in the PMU Physical Core Time Base Enable
  369. * Register (PCTBENR), which allows the watchdog to operate.
  370. */
  371. setbits_le32(pctbenr, 0xff);
  372. #endif
  373. /* Enable clock for timer
  374. * This is a global setting.
  375. */
  376. out_le32(cntcr, 0x1);
  377. return 0;
  378. }
  379. void reset_cpu(ulong addr)
  380. {
  381. u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  382. u32 val;
  383. /* Raise RESET_REQ_B */
  384. val = scfg_in32(rstcr);
  385. val |= 0x02;
  386. scfg_out32(rstcr, val);
  387. }
  388. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  389. {
  390. phys_size_t ram_top = ram_size;
  391. #ifdef CONFIG_SYS_MEM_TOP_HIDE
  392. #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function
  393. #endif
  394. /* Carve the MC private DRAM block from the end of DRAM */
  395. #ifdef CONFIG_FSL_MC_ENET
  396. ram_top -= mc_get_dram_block_size();
  397. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  398. #endif
  399. return ram_top;
  400. }