lowlevel.S 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. /*
  2. * (C) Copyright 2014-2015 Freescale Semiconductor
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. *
  6. * Extracted from armv8/start.S
  7. */
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/gic.h>
  11. #include <asm/macro.h>
  12. #ifdef CONFIG_MP
  13. #include <asm/arch/mp.h>
  14. #endif
  15. ENTRY(lowlevel_init)
  16. mov x29, lr /* Save LR */
  17. #ifdef CONFIG_FSL_LSCH3
  18. /* Set Wuo bit for RN-I 20 */
  19. #ifdef CONFIG_LS2080A
  20. ldr x0, =CCI_AUX_CONTROL_BASE(20)
  21. ldr x1, =0x00000010
  22. bl ccn504_set_aux
  23. #endif
  24. /* Add fully-coherent masters to DVM domain */
  25. ldr x0, =CCI_MN_BASE
  26. ldr x1, =CCI_MN_RNF_NODEID_LIST
  27. ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
  28. bl ccn504_add_masters_to_dvm
  29. /* Set all RN-I ports to QoS of 15 */
  30. ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
  31. ldr x1, =0x00FF000C
  32. bl ccn504_set_qos
  33. ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
  34. ldr x1, =0x00FF000C
  35. bl ccn504_set_qos
  36. ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
  37. ldr x1, =0x00FF000C
  38. bl ccn504_set_qos
  39. ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
  40. ldr x1, =0x00FF000C
  41. bl ccn504_set_qos
  42. ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
  43. ldr x1, =0x00FF000C
  44. bl ccn504_set_qos
  45. ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
  46. ldr x1, =0x00FF000C
  47. bl ccn504_set_qos
  48. ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
  49. ldr x1, =0x00FF000C
  50. bl ccn504_set_qos
  51. ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
  52. ldr x1, =0x00FF000C
  53. bl ccn504_set_qos
  54. ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
  55. ldr x1, =0x00FF000C
  56. bl ccn504_set_qos
  57. ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
  58. ldr x1, =0x00FF000C
  59. bl ccn504_set_qos
  60. ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
  61. ldr x1, =0x00FF000C
  62. bl ccn504_set_qos
  63. ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
  64. ldr x1, =0x00FF000C
  65. bl ccn504_set_qos
  66. ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
  67. ldr x1, =0x00FF000C
  68. bl ccn504_set_qos
  69. ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
  70. ldr x1, =0x00FF000C
  71. bl ccn504_set_qos
  72. ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
  73. ldr x1, =0x00FF000C
  74. bl ccn504_set_qos
  75. ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
  76. ldr x1, =0x00FF000C
  77. bl ccn504_set_qos
  78. ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
  79. ldr x1, =0x00FF000C
  80. bl ccn504_set_qos
  81. ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
  82. ldr x1, =0x00FF000C
  83. bl ccn504_set_qos
  84. #endif
  85. #ifdef SMMU_BASE
  86. /* Set the SMMU page size in the sACR register */
  87. ldr x1, =SMMU_BASE
  88. ldr w0, [x1, #0x10]
  89. orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
  90. str w0, [x1, #0x10]
  91. #endif
  92. /* Initialize GIC Secure Bank Status */
  93. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  94. branch_if_slave x0, 1f
  95. ldr x0, =GICD_BASE
  96. bl gic_init_secure
  97. 1:
  98. #ifdef CONFIG_GICV3
  99. ldr x0, =GICR_BASE
  100. bl gic_init_secure_percpu
  101. #elif defined(CONFIG_GICV2)
  102. ldr x0, =GICD_BASE
  103. ldr x1, =GICC_BASE
  104. bl gic_init_secure_percpu
  105. #endif
  106. #endif
  107. branch_if_master x0, x1, 2f
  108. #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
  109. ldr x0, =secondary_boot_func
  110. blr x0
  111. #endif
  112. 2:
  113. #ifdef CONFIG_FSL_TZPC_BP147
  114. /* Set Non Secure access for all devices protected via TZPC */
  115. ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
  116. orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
  117. str w0, [x1]
  118. isb
  119. dsb sy
  120. #endif
  121. #ifdef CONFIG_FSL_TZASC_400
  122. /* Set TZASC so that:
  123. * a. We use only Region0 whose global secure write/read is EN
  124. * b. We use only Region0 whose NSAID write/read is EN
  125. *
  126. * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
  127. * placeholders.
  128. */
  129. ldr x1, =TZASC_GATE_KEEPER(0)
  130. ldr x0, [x1] /* Filter 0 Gate Keeper Register */
  131. orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
  132. str x0, [x1]
  133. ldr x1, =TZASC_GATE_KEEPER(1)
  134. ldr x0, [x1] /* Filter 0 Gate Keeper Register */
  135. orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
  136. str x0, [x1]
  137. ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
  138. ldr x0, [x1] /* Region-0 Attributes Register */
  139. orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
  140. orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
  141. str x0, [x1]
  142. ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
  143. ldr x0, [x1] /* Region-1 Attributes Register */
  144. orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
  145. orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
  146. str x0, [x1]
  147. ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
  148. ldr w0, [x1] /* Region-0 Access Register */
  149. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  150. str w0, [x1]
  151. ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
  152. ldr w0, [x1] /* Region-1 Attributes Register */
  153. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  154. str w0, [x1]
  155. isb
  156. dsb sy
  157. #endif
  158. #ifdef CONFIG_ARCH_LS1046A
  159. /* Initialize the L2 RAM latency */
  160. mrs x1, S3_1_c11_c0_2
  161. mov x0, #0x1C7
  162. /* Clear L2 Tag RAM latency and L2 Data RAM latency */
  163. bic x1, x1, x0
  164. /* Set L2 data ram latency bits [2:0] */
  165. orr x1, x1, #0x2
  166. /* set L2 tag ram latency bits [8:6] */
  167. orr x1, x1, #0x80
  168. msr S3_1_c11_c0_2, x1
  169. isb
  170. #endif
  171. mov lr, x29 /* Restore LR */
  172. ret
  173. ENDPROC(lowlevel_init)
  174. #ifdef CONFIG_FSL_LSCH3
  175. hnf_pstate_poll:
  176. /* x0 has the desired status, return 0 for success, 1 for timeout
  177. * clobber x1, x2, x3, x4, x6, x7
  178. */
  179. mov x1, x0
  180. mov x7, #0 /* flag for timeout */
  181. mrs x3, cntpct_el0 /* read timer */
  182. add x3, x3, #1200 /* timeout after 100 microseconds */
  183. mov x0, #0x18
  184. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
  185. mov w6, #8 /* HN-F node count */
  186. 1:
  187. ldr x2, [x0]
  188. cmp x2, x1 /* check status */
  189. b.eq 2f
  190. mrs x4, cntpct_el0
  191. cmp x4, x3
  192. b.ls 1b
  193. mov x7, #1 /* timeout */
  194. b 3f
  195. 2:
  196. add x0, x0, #0x10000 /* move to next node */
  197. subs w6, w6, #1
  198. cbnz w6, 1b
  199. 3:
  200. mov x0, x7
  201. ret
  202. hnf_set_pstate:
  203. /* x0 has the desired state, clobber x1, x2, x6 */
  204. mov x1, x0
  205. /* power state to SFONLY */
  206. mov w6, #8 /* HN-F node count */
  207. mov x0, #0x10
  208. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
  209. 1: /* set pstate to sfonly */
  210. ldr x2, [x0]
  211. and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
  212. orr x2, x2, x1
  213. str x2, [x0]
  214. add x0, x0, #0x10000 /* move to next node */
  215. subs w6, w6, #1
  216. cbnz w6, 1b
  217. ret
  218. ENTRY(__asm_flush_l3_cache)
  219. /*
  220. * Return status in x0
  221. * success 0
  222. * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
  223. */
  224. mov x29, lr
  225. mov x8, #0
  226. dsb sy
  227. mov x0, #0x1 /* HNFPSTAT_SFONLY */
  228. bl hnf_set_pstate
  229. mov x0, #0x4 /* SFONLY status */
  230. bl hnf_pstate_poll
  231. cbz x0, 1f
  232. mov x8, #1 /* timeout */
  233. 1:
  234. dsb sy
  235. mov x0, #0x3 /* HNFPSTAT_FAM */
  236. bl hnf_set_pstate
  237. mov x0, #0xc /* FAM status */
  238. bl hnf_pstate_poll
  239. cbz x0, 1f
  240. add x8, x8, #0x2
  241. 1:
  242. mov x0, x8
  243. mov lr, x29
  244. ret
  245. ENDPROC(__asm_flush_l3_cache)
  246. #endif
  247. #ifdef CONFIG_MP
  248. /* Keep literals not used by the secondary boot code outside it */
  249. .ltorg
  250. /* Using 64 bit alignment since the spin table is accessed as data */
  251. .align 4
  252. .global secondary_boot_code
  253. /* Secondary Boot Code starts here */
  254. secondary_boot_code:
  255. .global __spin_table
  256. __spin_table:
  257. .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
  258. .align 2
  259. ENTRY(secondary_boot_func)
  260. /*
  261. * MPIDR_EL1 Fields:
  262. * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
  263. * MPIDR[7:2] = AFF0_RES
  264. * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
  265. * MPIDR[23:16] = AFF2_CLUSTERID
  266. * MPIDR[24] = MT
  267. * MPIDR[29:25] = RES0
  268. * MPIDR[30] = U
  269. * MPIDR[31] = ME
  270. * MPIDR[39:32] = AFF3
  271. *
  272. * Linear Processor ID (LPID) calculation from MPIDR_EL1:
  273. * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
  274. * until AFF2_CLUSTERID and AFF3 have non-zero values)
  275. *
  276. * LPID = MPIDR[15:8] | MPIDR[1:0]
  277. */
  278. mrs x0, mpidr_el1
  279. ubfm x1, x0, #8, #15
  280. ubfm x2, x0, #0, #1
  281. orr x10, x2, x1, lsl #2 /* x10 has LPID */
  282. ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
  283. /*
  284. * offset of the spin table element for this core from start of spin
  285. * table (each elem is padded to 64 bytes)
  286. */
  287. lsl x1, x10, #6
  288. ldr x0, =__spin_table
  289. /* physical address of this cpus spin table element */
  290. add x11, x1, x0
  291. ldr x0, =__real_cntfrq
  292. ldr x0, [x0]
  293. msr cntfrq_el0, x0 /* set with real frequency */
  294. str x9, [x11, #16] /* LPID */
  295. mov x4, #1
  296. str x4, [x11, #8] /* STATUS */
  297. dsb sy
  298. #if defined(CONFIG_GICV3)
  299. gic_wait_for_interrupt_m x0
  300. #elif defined(CONFIG_GICV2)
  301. ldr x0, =GICC_BASE
  302. gic_wait_for_interrupt_m x0, w1
  303. #endif
  304. bl secondary_switch_to_el2
  305. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  306. bl secondary_switch_to_el1
  307. #endif
  308. slave_cpu:
  309. wfe
  310. ldr x0, [x11]
  311. cbz x0, slave_cpu
  312. #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
  313. mrs x1, sctlr_el2
  314. #else
  315. mrs x1, sctlr_el1
  316. #endif
  317. tbz x1, #25, cpu_is_le
  318. rev x0, x0 /* BE to LE conversion */
  319. cpu_is_le:
  320. br x0 /* branch to the given address */
  321. ENDPROC(secondary_boot_func)
  322. ENTRY(secondary_switch_to_el2)
  323. switch_el x0, 1f, 0f, 0f
  324. 0: ret
  325. 1: armv8_switch_to_el2_m x0
  326. ENDPROC(secondary_switch_to_el2)
  327. ENTRY(secondary_switch_to_el1)
  328. switch_el x0, 0f, 1f, 0f
  329. 0: ret
  330. 1: armv8_switch_to_el1_m x0, x1
  331. ENDPROC(secondary_switch_to_el1)
  332. /* Ensure that the literals used by the secondary boot code are
  333. * assembled within it (this is required so that we can protect
  334. * this area with a single memreserve region
  335. */
  336. .ltorg
  337. /* 64 bit alignment for elements accessed as data */
  338. .align 4
  339. .global __real_cntfrq
  340. __real_cntfrq:
  341. .quad COUNTER_FREQUENCY
  342. .globl __secondary_boot_code_size
  343. .type __secondary_boot_code_size, %object
  344. /* Secondary Boot Code ends here */
  345. __secondary_boot_code_size:
  346. .quad .-secondary_boot_code
  347. #endif