cpu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /*
  2. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #include <common.h>
  7. #include <asm/io.h>
  8. #include <linux/errno.h>
  9. #include <asm/system.h>
  10. #include <asm/armv8/mmu.h>
  11. #include <asm/io.h>
  12. #include <asm/arch/fsl_serdes.h>
  13. #include <asm/arch/soc.h>
  14. #include <asm/arch/cpu.h>
  15. #include <asm/arch/speed.h>
  16. #ifdef CONFIG_MP
  17. #include <asm/arch/mp.h>
  18. #endif
  19. #include <fm_eth.h>
  20. #include <fsl_debug_server.h>
  21. #include <fsl-mc/fsl_mc.h>
  22. #ifdef CONFIG_FSL_ESDHC
  23. #include <fsl_esdhc.h>
  24. #endif
  25. #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
  26. #include <asm/armv8/sec_firmware.h>
  27. #endif
  28. DECLARE_GLOBAL_DATA_PTR;
  29. struct mm_region *mem_map = early_map;
  30. void cpu_name(char *name)
  31. {
  32. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  33. unsigned int i, svr, ver;
  34. svr = gur_in32(&gur->svr);
  35. ver = SVR_SOC_VER(svr);
  36. for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
  37. if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
  38. strcpy(name, cpu_type_list[i].name);
  39. if (IS_E_PROCESSOR(svr))
  40. strcat(name, "E");
  41. break;
  42. }
  43. if (i == ARRAY_SIZE(cpu_type_list))
  44. strcpy(name, "unknown");
  45. }
  46. #ifndef CONFIG_SYS_DCACHE_OFF
  47. /*
  48. * To start MMU before DDR is available, we create MMU table in SRAM.
  49. * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
  50. * levels of translation tables here to cover 40-bit address space.
  51. * We use 4KB granule size, with 40 bits physical address, T0SZ=24
  52. * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
  53. * Note, the debug print in cache_v8.c is not usable for debugging
  54. * these early MMU tables because UART is not yet available.
  55. */
  56. static inline void early_mmu_setup(void)
  57. {
  58. unsigned int el = current_el();
  59. /* global data is already setup, no allocation yet */
  60. gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
  61. gd->arch.tlb_fillptr = gd->arch.tlb_addr;
  62. gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
  63. /* Create early page tables */
  64. setup_pgtables();
  65. /* point TTBR to the new table */
  66. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  67. get_tcr(el, NULL, NULL) &
  68. ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
  69. MEMORY_ATTRIBUTES);
  70. set_sctlr(get_sctlr() | CR_M);
  71. }
  72. /*
  73. * The final tables look similar to early tables, but different in detail.
  74. * These tables are in DRAM. Sub tables are added to enable cache for
  75. * QBMan and OCRAM.
  76. *
  77. * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
  78. * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
  79. */
  80. static inline void final_mmu_setup(void)
  81. {
  82. u64 tlb_addr_save = gd->arch.tlb_addr;
  83. unsigned int el = current_el();
  84. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  85. int index;
  86. #endif
  87. mem_map = final_map;
  88. #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
  89. if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
  90. if (el == 3) {
  91. /*
  92. * Only use gd->arch.secure_ram if the address is
  93. * recalculated. Align to 4KB for MMU table.
  94. */
  95. /* put page tables in secure ram */
  96. index = ARRAY_SIZE(final_map) - 2;
  97. gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
  98. final_map[index].virt = gd->arch.secure_ram & ~0x3;
  99. final_map[index].phys = final_map[index].virt;
  100. final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
  101. final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
  102. gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
  103. tlb_addr_save = gd->arch.tlb_addr;
  104. } else {
  105. /* Use allocated (board_f.c) memory for TLB */
  106. tlb_addr_save = gd->arch.tlb_allocated;
  107. gd->arch.tlb_addr = tlb_addr_save;
  108. }
  109. }
  110. #endif
  111. /* Reset the fill ptr */
  112. gd->arch.tlb_fillptr = tlb_addr_save;
  113. /* Create normal system page tables */
  114. setup_pgtables();
  115. /* Create emergency page tables */
  116. gd->arch.tlb_addr = gd->arch.tlb_fillptr;
  117. gd->arch.tlb_emerg = gd->arch.tlb_addr;
  118. setup_pgtables();
  119. gd->arch.tlb_addr = tlb_addr_save;
  120. /* flush new MMU table */
  121. flush_dcache_range(gd->arch.tlb_addr,
  122. gd->arch.tlb_addr + gd->arch.tlb_size);
  123. /* point TTBR to the new table */
  124. set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
  125. MEMORY_ATTRIBUTES);
  126. /*
  127. * EL3 MMU is already enabled, just need to invalidate TLB to load the
  128. * new table. The new table is compatible with the current table, if
  129. * MMU somehow walks through the new table before invalidation TLB,
  130. * it still works. So we don't need to turn off MMU here.
  131. * When EL2 MMU table is created by calling this function, MMU needs
  132. * to be enabled.
  133. */
  134. set_sctlr(get_sctlr() | CR_M);
  135. }
  136. u64 get_page_table_size(void)
  137. {
  138. return 0x10000;
  139. }
  140. int arch_cpu_init(void)
  141. {
  142. icache_enable();
  143. __asm_invalidate_dcache_all();
  144. __asm_invalidate_tlb_all();
  145. early_mmu_setup();
  146. set_sctlr(get_sctlr() | CR_C);
  147. return 0;
  148. }
  149. void mmu_setup(void)
  150. {
  151. final_mmu_setup();
  152. }
  153. /*
  154. * This function is called from common/board_r.c.
  155. * It recreates MMU table in main memory.
  156. */
  157. void enable_caches(void)
  158. {
  159. mmu_setup();
  160. __asm_invalidate_tlb_all();
  161. icache_enable();
  162. dcache_enable();
  163. }
  164. #endif
  165. static inline u32 initiator_type(u32 cluster, int init_id)
  166. {
  167. struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  168. u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
  169. u32 type = 0;
  170. type = gur_in32(&gur->tp_ityp[idx]);
  171. if (type & TP_ITYP_AV)
  172. return type;
  173. return 0;
  174. }
  175. u32 cpu_mask(void)
  176. {
  177. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  178. int i = 0, count = 0;
  179. u32 cluster, type, mask = 0;
  180. do {
  181. int j;
  182. cluster = gur_in32(&gur->tp_cluster[i].lower);
  183. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  184. type = initiator_type(cluster, j);
  185. if (type) {
  186. if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
  187. mask |= 1 << count;
  188. count++;
  189. }
  190. }
  191. i++;
  192. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  193. return mask;
  194. }
  195. /*
  196. * Return the number of cores on this SOC.
  197. */
  198. int cpu_numcores(void)
  199. {
  200. return hweight32(cpu_mask());
  201. }
  202. int fsl_qoriq_core_to_cluster(unsigned int core)
  203. {
  204. struct ccsr_gur __iomem *gur =
  205. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  206. int i = 0, count = 0;
  207. u32 cluster;
  208. do {
  209. int j;
  210. cluster = gur_in32(&gur->tp_cluster[i].lower);
  211. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  212. if (initiator_type(cluster, j)) {
  213. if (count == core)
  214. return i;
  215. count++;
  216. }
  217. }
  218. i++;
  219. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  220. return -1; /* cannot identify the cluster */
  221. }
  222. u32 fsl_qoriq_core_to_type(unsigned int core)
  223. {
  224. struct ccsr_gur __iomem *gur =
  225. (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
  226. int i = 0, count = 0;
  227. u32 cluster, type;
  228. do {
  229. int j;
  230. cluster = gur_in32(&gur->tp_cluster[i].lower);
  231. for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
  232. type = initiator_type(cluster, j);
  233. if (type) {
  234. if (count == core)
  235. return type;
  236. count++;
  237. }
  238. }
  239. i++;
  240. } while ((cluster & TP_CLUSTER_EOC) == 0x0);
  241. return -1; /* cannot identify the cluster */
  242. }
  243. uint get_svr(void)
  244. {
  245. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  246. return gur_in32(&gur->svr);
  247. }
  248. #ifdef CONFIG_DISPLAY_CPUINFO
  249. int print_cpuinfo(void)
  250. {
  251. struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
  252. struct sys_info sysinfo;
  253. char buf[32];
  254. unsigned int i, core;
  255. u32 type, rcw, svr = gur_in32(&gur->svr);
  256. puts("SoC: ");
  257. cpu_name(buf);
  258. printf(" %s (0x%x)\n", buf, svr);
  259. memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
  260. get_sys_info(&sysinfo);
  261. puts("Clock Configuration:");
  262. for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
  263. if (!(i % 3))
  264. puts("\n ");
  265. type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
  266. printf("CPU%d(%s):%-4s MHz ", core,
  267. type == TY_ITYP_VER_A7 ? "A7 " :
  268. (type == TY_ITYP_VER_A53 ? "A53" :
  269. (type == TY_ITYP_VER_A57 ? "A57" :
  270. (type == TY_ITYP_VER_A72 ? "A72" : " "))),
  271. strmhz(buf, sysinfo.freq_processor[core]));
  272. }
  273. printf("\n Bus: %-4s MHz ",
  274. strmhz(buf, sysinfo.freq_systembus));
  275. printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
  276. #ifdef CONFIG_SYS_DPAA_FMAN
  277. printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
  278. #endif
  279. #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
  280. if (soc_has_dp_ddr()) {
  281. printf(" DP-DDR: %-4s MT/s",
  282. strmhz(buf, sysinfo.freq_ddrbus2));
  283. }
  284. #endif
  285. puts("\n");
  286. /*
  287. * Display the RCW, so that no one gets confused as to what RCW
  288. * we're actually using for this boot.
  289. */
  290. puts("Reset Configuration Word (RCW):");
  291. for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
  292. rcw = gur_in32(&gur->rcwsr[i]);
  293. if ((i % 4) == 0)
  294. printf("\n %08x:", i * 4);
  295. printf(" %08x", rcw);
  296. }
  297. puts("\n");
  298. return 0;
  299. }
  300. #endif
  301. #ifdef CONFIG_FSL_ESDHC
  302. int cpu_mmc_init(bd_t *bis)
  303. {
  304. return fsl_esdhc_mmc_init(bis);
  305. }
  306. #endif
  307. int cpu_eth_init(bd_t *bis)
  308. {
  309. int error = 0;
  310. #ifdef CONFIG_FSL_MC_ENET
  311. error = fsl_mc_ldpaa_init(bis);
  312. #endif
  313. #ifdef CONFIG_FMAN_ENET
  314. fm_standard_init(bis);
  315. #endif
  316. return error;
  317. }
  318. int arch_early_init_r(void)
  319. {
  320. #ifdef CONFIG_MP
  321. int rv = 1;
  322. u32 psci_ver = 0xffffffff;
  323. #endif
  324. #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
  325. erratum_a009635();
  326. #endif
  327. #ifdef CONFIG_MP
  328. #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI)
  329. /* Check the psci version to determine if the psci is supported */
  330. psci_ver = sec_firmware_support_psci_version();
  331. #endif
  332. if (psci_ver == 0xffffffff) {
  333. rv = fsl_layerscape_wake_seconday_cores();
  334. if (rv)
  335. printf("Did not wake secondary cores\n");
  336. }
  337. #endif
  338. #ifdef CONFIG_SYS_HAS_SERDES
  339. fsl_serdes_init();
  340. #endif
  341. #ifdef CONFIG_FMAN_ENET
  342. fman_enet_init();
  343. #endif
  344. return 0;
  345. }
  346. int timer_init(void)
  347. {
  348. u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
  349. #ifdef CONFIG_FSL_LSCH3
  350. u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
  351. #endif
  352. #ifdef CONFIG_LS2080A
  353. u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
  354. #endif
  355. #ifdef COUNTER_FREQUENCY_REAL
  356. unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
  357. /* Update with accurate clock frequency */
  358. asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
  359. #endif
  360. #ifdef CONFIG_FSL_LSCH3
  361. /* Enable timebase for all clusters.
  362. * It is safe to do so even some clusters are not enabled.
  363. */
  364. out_le32(cltbenr, 0xf);
  365. #endif
  366. #ifdef CONFIG_LS2080A
  367. /*
  368. * In certain Layerscape SoCs, the clock for each core's
  369. * has an enable bit in the PMU Physical Core Time Base Enable
  370. * Register (PCTBENR), which allows the watchdog to operate.
  371. */
  372. setbits_le32(pctbenr, 0xff);
  373. #endif
  374. /* Enable clock for timer
  375. * This is a global setting.
  376. */
  377. out_le32(cntcr, 0x1);
  378. return 0;
  379. }
  380. void reset_cpu(ulong addr)
  381. {
  382. u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
  383. u32 val;
  384. /* Raise RESET_REQ_B */
  385. val = scfg_in32(rstcr);
  386. val |= 0x02;
  387. scfg_out32(rstcr, val);
  388. }
  389. phys_size_t board_reserve_ram_top(phys_size_t ram_size)
  390. {
  391. phys_size_t ram_top = ram_size;
  392. #ifdef CONFIG_SYS_MEM_TOP_HIDE
  393. #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function
  394. #endif
  395. /* Carve the Debug Server private DRAM block from the end of DRAM */
  396. #ifdef CONFIG_FSL_DEBUG_SERVER
  397. ram_top -= debug_server_get_dram_block_size();
  398. #endif
  399. /* Carve the MC private DRAM block from the end of DRAM */
  400. #ifdef CONFIG_FSL_MC_ENET
  401. ram_top -= mc_get_dram_block_size();
  402. ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
  403. #endif
  404. return ram_top;
  405. }