lowlevel.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. /*
  2. * (C) Copyright 2014-2015 Freescale Semiconductor
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. *
  6. * Extracted from armv8/start.S
  7. */
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/gic.h>
  11. #include <asm/macro.h>
  12. #include <asm/arch-fsl-layerscape/soc.h>
  13. #ifdef CONFIG_MP
  14. #include <asm/arch/mp.h>
  15. #endif
  16. #ifdef CONFIG_FSL_LSCH3
  17. #include <asm/arch-fsl-layerscape/immap_lsch3.h>
  18. #endif
  19. #include <asm/u-boot.h>
  20. /* Get GIC offset
  21. * For LS1043a rev1.0, GIC base address align with 4k.
  22. * For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
  23. * is set, GIC base address align with 4K, or else align
  24. * with 64k.
  25. * output:
  26. * x0: the base address of GICD
  27. * x1: the base address of GICC
  28. */
  29. ENTRY(get_gic_offset)
  30. ldr x0, =GICD_BASE
  31. #ifdef CONFIG_GICV2
  32. ldr x1, =GICC_BASE
  33. #endif
  34. #ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN
  35. ldr x2, =DCFG_CCSR_SVR
  36. ldr w2, [x2]
  37. rev w2, w2
  38. lsr w3, w2, #16
  39. ldr w4, =SVR_DEV(SVR_LS1043A)
  40. cmp w3, w4
  41. b.ne 1f
  42. ands w2, w2, #0xff
  43. cmp w2, #REV1_0
  44. b.eq 1f
  45. ldr x2, =SCFG_GIC400_ALIGN
  46. ldr w2, [x2]
  47. rev w2, w2
  48. tbnz w2, #GIC_ADDR_BIT, 1f
  49. ldr x0, =GICD_BASE_64K
  50. #ifdef CONFIG_GICV2
  51. ldr x1, =GICC_BASE_64K
  52. #endif
  53. 1:
  54. #endif
  55. ret
  56. ENDPROC(get_gic_offset)
  57. ENTRY(smp_kick_all_cpus)
  58. /* Kick secondary cpus up by SGI 0 interrupt */
  59. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  60. mov x29, lr /* Save LR */
  61. bl get_gic_offset
  62. bl gic_kick_secondary_cpus
  63. mov lr, x29 /* Restore LR */
  64. #endif
  65. ret
  66. ENDPROC(smp_kick_all_cpus)
  67. ENTRY(lowlevel_init)
  68. mov x29, lr /* Save LR */
  69. switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
  70. 1:
  71. #if defined (CONFIG_SYS_FSL_HAS_CCN504)
  72. /* Set Wuo bit for RN-I 20 */
  73. #ifdef CONFIG_ARCH_LS2080A
  74. ldr x0, =CCI_AUX_CONTROL_BASE(20)
  75. ldr x1, =0x00000010
  76. bl ccn504_set_aux
  77. /*
  78. * Set forced-order mode in RNI-6, RNI-20
  79. * This is required for performance optimization on LS2088A
  80. * LS2080A family does not support setting forced-order mode,
  81. * so skip this operation for LS2080A family
  82. */
  83. bl get_svr
  84. lsr w0, w0, #16
  85. ldr w1, =SVR_DEV(SVR_LS2080A)
  86. cmp w0, w1
  87. b.eq 1f
  88. ldr x0, =CCI_AUX_CONTROL_BASE(6)
  89. ldr x1, =0x00000020
  90. bl ccn504_set_aux
  91. ldr x0, =CCI_AUX_CONTROL_BASE(20)
  92. ldr x1, =0x00000020
  93. bl ccn504_set_aux
  94. 1:
  95. #endif
  96. /* Add fully-coherent masters to DVM domain */
  97. ldr x0, =CCI_MN_BASE
  98. ldr x1, =CCI_MN_RNF_NODEID_LIST
  99. ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
  100. bl ccn504_add_masters_to_dvm
  101. /* Set all RN-I ports to QoS of 15 */
  102. ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
  103. ldr x1, =0x00FF000C
  104. bl ccn504_set_qos
  105. ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
  106. ldr x1, =0x00FF000C
  107. bl ccn504_set_qos
  108. ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
  109. ldr x1, =0x00FF000C
  110. bl ccn504_set_qos
  111. ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
  112. ldr x1, =0x00FF000C
  113. bl ccn504_set_qos
  114. ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
  115. ldr x1, =0x00FF000C
  116. bl ccn504_set_qos
  117. ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
  118. ldr x1, =0x00FF000C
  119. bl ccn504_set_qos
  120. ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
  121. ldr x1, =0x00FF000C
  122. bl ccn504_set_qos
  123. ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
  124. ldr x1, =0x00FF000C
  125. bl ccn504_set_qos
  126. ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
  127. ldr x1, =0x00FF000C
  128. bl ccn504_set_qos
  129. ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
  130. ldr x1, =0x00FF000C
  131. bl ccn504_set_qos
  132. ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
  133. ldr x1, =0x00FF000C
  134. bl ccn504_set_qos
  135. ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
  136. ldr x1, =0x00FF000C
  137. bl ccn504_set_qos
  138. ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
  139. ldr x1, =0x00FF000C
  140. bl ccn504_set_qos
  141. ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
  142. ldr x1, =0x00FF000C
  143. bl ccn504_set_qos
  144. ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
  145. ldr x1, =0x00FF000C
  146. bl ccn504_set_qos
  147. ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
  148. ldr x1, =0x00FF000C
  149. bl ccn504_set_qos
  150. ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
  151. ldr x1, =0x00FF000C
  152. bl ccn504_set_qos
  153. ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
  154. ldr x1, =0x00FF000C
  155. bl ccn504_set_qos
  156. #endif /* CONFIG_SYS_FSL_HAS_CCN504 */
  157. #ifdef SMMU_BASE
  158. /* Set the SMMU page size in the sACR register */
  159. ldr x1, =SMMU_BASE
  160. ldr w0, [x1, #0x10]
  161. orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
  162. str w0, [x1, #0x10]
  163. #endif
  164. /* Initialize GIC Secure Bank Status */
  165. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  166. branch_if_slave x0, 1f
  167. bl get_gic_offset
  168. bl gic_init_secure
  169. 1:
  170. #ifdef CONFIG_GICV3
  171. ldr x0, =GICR_BASE
  172. bl gic_init_secure_percpu
  173. #elif defined(CONFIG_GICV2)
  174. bl get_gic_offset
  175. bl gic_init_secure_percpu
  176. #endif
  177. #endif
  178. 100:
  179. branch_if_master x0, x1, 2f
  180. #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
  181. ldr x0, =secondary_boot_func
  182. blr x0
  183. #endif
  184. 2:
  185. switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
  186. 1:
  187. #ifdef CONFIG_FSL_TZPC_BP147
  188. /* Set Non Secure access for all devices protected via TZPC */
  189. ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
  190. orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
  191. str w0, [x1]
  192. isb
  193. dsb sy
  194. #endif
  195. #ifdef CONFIG_FSL_TZASC_400
  196. /*
  197. * LS2080 and its personalities does not support TZASC
  198. * So skip TZASC related operations
  199. */
  200. bl get_svr
  201. lsr w0, w0, #16
  202. ldr w1, =SVR_DEV(SVR_LS2080A)
  203. cmp w0, w1
  204. b.eq 1f
  205. /* Set TZASC so that:
  206. * a. We use only Region0 whose global secure write/read is EN
  207. * b. We use only Region0 whose NSAID write/read is EN
  208. *
  209. * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
  210. * placeholders.
  211. */
  212. #ifdef CONFIG_FSL_TZASC_1
  213. ldr x1, =TZASC_GATE_KEEPER(0)
  214. ldr w0, [x1] /* Filter 0 Gate Keeper Register */
  215. orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */
  216. str w0, [x1]
  217. ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
  218. ldr w0, [x1] /* Region-0 Attributes Register */
  219. orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */
  220. orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */
  221. str w0, [x1]
  222. ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
  223. ldr w0, [x1] /* Region-0 Access Register */
  224. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  225. str w0, [x1]
  226. #endif
  227. #ifdef CONFIG_FSL_TZASC_2
  228. ldr x1, =TZASC_GATE_KEEPER(1)
  229. ldr w0, [x1] /* Filter 0 Gate Keeper Register */
  230. orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */
  231. str w0, [x1]
  232. ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
  233. ldr w0, [x1] /* Region-1 Attributes Register */
  234. orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */
  235. orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */
  236. str w0, [x1]
  237. ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
  238. ldr w0, [x1] /* Region-1 Attributes Register */
  239. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  240. str w0, [x1]
  241. #endif
  242. isb
  243. dsb sy
  244. #endif
  245. 100:
  246. 1:
  247. #ifdef CONFIG_ARCH_LS1046A
  248. switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
  249. 1:
  250. /* Initialize the L2 RAM latency */
  251. mrs x1, S3_1_c11_c0_2
  252. mov x0, #0x1C7
  253. /* Clear L2 Tag RAM latency and L2 Data RAM latency */
  254. bic x1, x1, x0
  255. /* Set L2 data ram latency bits [2:0] */
  256. orr x1, x1, #0x2
  257. /* set L2 tag ram latency bits [8:6] */
  258. orr x1, x1, #0x80
  259. msr S3_1_c11_c0_2, x1
  260. isb
  261. 100:
  262. #endif
  263. #if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
  264. bl fsl_ocram_init
  265. #endif
  266. mov lr, x29 /* Restore LR */
  267. ret
  268. ENDPROC(lowlevel_init)
  269. #if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
  270. ENTRY(fsl_ocram_init)
  271. mov x28, lr /* Save LR */
  272. bl fsl_clear_ocram
  273. bl fsl_ocram_clear_ecc_err
  274. mov lr, x28 /* Restore LR */
  275. ret
  276. ENDPROC(fsl_ocram_init)
  277. ENTRY(fsl_clear_ocram)
  278. /* Clear OCRAM */
  279. ldr x0, =CONFIG_SYS_FSL_OCRAM_BASE
  280. ldr x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
  281. mov x2, #0
  282. clear_loop:
  283. str x2, [x0]
  284. add x0, x0, #8
  285. cmp x0, x1
  286. b.lo clear_loop
  287. ret
  288. ENDPROC(fsl_clear_ocram)
  289. ENTRY(fsl_ocram_clear_ecc_err)
  290. /* OCRAM1/2 ECC status bit */
  291. mov w1, #0x60
  292. ldr x0, =DCSR_DCFG_SBEESR2
  293. str w1, [x0]
  294. ldr x0, =DCSR_DCFG_MBEESR2
  295. str w1, [x0]
  296. ret
  297. ENDPROC(fsl_ocram_init)
  298. #endif
  299. #ifdef CONFIG_FSL_LSCH3
  300. .globl get_svr
  301. get_svr:
  302. ldr x1, =FSL_LSCH3_SVR
  303. ldr w0, [x1]
  304. ret
  305. #endif
  306. #ifdef CONFIG_SYS_FSL_HAS_CCN504
  307. hnf_pstate_poll:
  308. /* x0 has the desired status, return 0 for success, 1 for timeout
  309. * clobber x1, x2, x3, x4, x6, x7
  310. */
  311. mov x1, x0
  312. mov x7, #0 /* flag for timeout */
  313. mrs x3, cntpct_el0 /* read timer */
  314. add x3, x3, #1200 /* timeout after 100 microseconds */
  315. mov x0, #0x18
  316. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
  317. mov w6, #8 /* HN-F node count */
  318. 1:
  319. ldr x2, [x0]
  320. cmp x2, x1 /* check status */
  321. b.eq 2f
  322. mrs x4, cntpct_el0
  323. cmp x4, x3
  324. b.ls 1b
  325. mov x7, #1 /* timeout */
  326. b 3f
  327. 2:
  328. add x0, x0, #0x10000 /* move to next node */
  329. subs w6, w6, #1
  330. cbnz w6, 1b
  331. 3:
  332. mov x0, x7
  333. ret
  334. hnf_set_pstate:
  335. /* x0 has the desired state, clobber x1, x2, x6 */
  336. mov x1, x0
  337. /* power state to SFONLY */
  338. mov w6, #8 /* HN-F node count */
  339. mov x0, #0x10
  340. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
  341. 1: /* set pstate to sfonly */
  342. ldr x2, [x0]
  343. and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
  344. orr x2, x2, x1
  345. str x2, [x0]
  346. add x0, x0, #0x10000 /* move to next node */
  347. subs w6, w6, #1
  348. cbnz w6, 1b
  349. ret
  350. ENTRY(__asm_flush_l3_dcache)
  351. /*
  352. * Return status in x0
  353. * success 0
  354. * timeout 1 for setting SFONLY, 2 for FAM, 3 for both
  355. */
  356. mov x29, lr
  357. mov x8, #0
  358. dsb sy
  359. mov x0, #0x1 /* HNFPSTAT_SFONLY */
  360. bl hnf_set_pstate
  361. mov x0, #0x4 /* SFONLY status */
  362. bl hnf_pstate_poll
  363. cbz x0, 1f
  364. mov x8, #1 /* timeout */
  365. 1:
  366. dsb sy
  367. mov x0, #0x3 /* HNFPSTAT_FAM */
  368. bl hnf_set_pstate
  369. mov x0, #0xc /* FAM status */
  370. bl hnf_pstate_poll
  371. cbz x0, 1f
  372. add x8, x8, #0x2
  373. 1:
  374. mov x0, x8
  375. mov lr, x29
  376. ret
  377. ENDPROC(__asm_flush_l3_dcache)
  378. #endif /* CONFIG_SYS_FSL_HAS_CCN504 */
  379. #ifdef CONFIG_MP
  380. /* Keep literals not used by the secondary boot code outside it */
  381. .ltorg
  382. /* Using 64 bit alignment since the spin table is accessed as data */
  383. .align 4
  384. .global secondary_boot_code
  385. /* Secondary Boot Code starts here */
  386. secondary_boot_code:
  387. .global __spin_table
  388. __spin_table:
  389. .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
  390. .align 2
  391. ENTRY(secondary_boot_func)
  392. /*
  393. * MPIDR_EL1 Fields:
  394. * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
  395. * MPIDR[7:2] = AFF0_RES
  396. * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
  397. * MPIDR[23:16] = AFF2_CLUSTERID
  398. * MPIDR[24] = MT
  399. * MPIDR[29:25] = RES0
  400. * MPIDR[30] = U
  401. * MPIDR[31] = ME
  402. * MPIDR[39:32] = AFF3
  403. *
  404. * Linear Processor ID (LPID) calculation from MPIDR_EL1:
  405. * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
  406. * until AFF2_CLUSTERID and AFF3 have non-zero values)
  407. *
  408. * LPID = MPIDR[15:8] | MPIDR[1:0]
  409. */
  410. mrs x0, mpidr_el1
  411. ubfm x1, x0, #8, #15
  412. ubfm x2, x0, #0, #1
  413. orr x10, x2, x1, lsl #2 /* x10 has LPID */
  414. ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
  415. /*
  416. * offset of the spin table element for this core from start of spin
  417. * table (each elem is padded to 64 bytes)
  418. */
  419. lsl x1, x10, #6
  420. ldr x0, =__spin_table
  421. /* physical address of this cpus spin table element */
  422. add x11, x1, x0
  423. ldr x0, =__real_cntfrq
  424. ldr x0, [x0]
  425. msr cntfrq_el0, x0 /* set with real frequency */
  426. str x9, [x11, #16] /* LPID */
  427. mov x4, #1
  428. str x4, [x11, #8] /* STATUS */
  429. dsb sy
  430. #if defined(CONFIG_GICV3)
  431. gic_wait_for_interrupt_m x0
  432. #elif defined(CONFIG_GICV2)
  433. bl get_gic_offset
  434. mov x0, x1
  435. gic_wait_for_interrupt_m x0, w1
  436. #endif
  437. slave_cpu:
  438. wfe
  439. ldr x0, [x11]
  440. cbz x0, slave_cpu
  441. #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
  442. mrs x1, sctlr_el2
  443. #else
  444. mrs x1, sctlr_el1
  445. #endif
  446. tbz x1, #25, cpu_is_le
  447. rev x0, x0 /* BE to LE conversion */
  448. cpu_is_le:
  449. ldr x5, [x11, #24]
  450. cbz x5, 1f
  451. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  452. adr x4, secondary_switch_to_el1
  453. ldr x5, =ES_TO_AARCH64
  454. #else
  455. ldr x4, [x11]
  456. ldr x5, =ES_TO_AARCH32
  457. #endif
  458. bl secondary_switch_to_el2
  459. 1:
  460. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  461. adr x4, secondary_switch_to_el1
  462. #else
  463. ldr x4, [x11]
  464. #endif
  465. ldr x5, =ES_TO_AARCH64
  466. bl secondary_switch_to_el2
  467. ENDPROC(secondary_boot_func)
  468. ENTRY(secondary_switch_to_el2)
  469. switch_el x6, 1f, 0f, 0f
  470. 0: ret
  471. 1: armv8_switch_to_el2_m x4, x5, x6
  472. ENDPROC(secondary_switch_to_el2)
  473. ENTRY(secondary_switch_to_el1)
  474. mrs x0, mpidr_el1
  475. ubfm x1, x0, #8, #15
  476. ubfm x2, x0, #0, #1
  477. orr x10, x2, x1, lsl #2 /* x10 has LPID */
  478. lsl x1, x10, #6
  479. ldr x0, =__spin_table
  480. /* physical address of this cpus spin table element */
  481. add x11, x1, x0
  482. ldr x4, [x11]
  483. ldr x5, [x11, #24]
  484. cbz x5, 2f
  485. ldr x5, =ES_TO_AARCH32
  486. bl switch_to_el1
  487. 2: ldr x5, =ES_TO_AARCH64
  488. switch_to_el1:
  489. switch_el x6, 0f, 1f, 0f
  490. 0: ret
  491. 1: armv8_switch_to_el1_m x4, x5, x6
  492. ENDPROC(secondary_switch_to_el1)
  493. /* Ensure that the literals used by the secondary boot code are
  494. * assembled within it (this is required so that we can protect
  495. * this area with a single memreserve region
  496. */
  497. .ltorg
  498. /* 64 bit alignment for elements accessed as data */
  499. .align 4
  500. .global __real_cntfrq
  501. __real_cntfrq:
  502. .quad COUNTER_FREQUENCY
  503. .globl __secondary_boot_code_size
  504. .type __secondary_boot_code_size, %object
  505. /* Secondary Boot Code ends here */
  506. __secondary_boot_code_size:
  507. .quad .-secondary_boot_code
  508. #endif