lowlevel.S 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. /*
  2. * (C) Copyright 2014-2015 Freescale Semiconductor
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. *
  6. * Extracted from armv8/start.S
  7. */
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/gic.h>
  11. #include <asm/macro.h>
  12. #ifdef CONFIG_MP
  13. #include <asm/arch/mp.h>
  14. #endif
  15. ENTRY(lowlevel_init)
  16. mov x29, lr /* Save LR */
  17. #ifdef CONFIG_FSL_LSCH3
  18. /* Set Wuo bit for RN-I 20 */
  19. #if defined(CONFIG_LS2085A) || defined (CONFIG_LS2080A)
  20. ldr x0, =CCI_AUX_CONTROL_BASE(20)
  21. ldr x1, =0x00000010
  22. bl ccn504_set_aux
  23. #endif
  24. /* Add fully-coherent masters to DVM domain */
  25. ldr x0, =CCI_MN_BASE
  26. ldr x1, =CCI_MN_RNF_NODEID_LIST
  27. ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
  28. bl ccn504_add_masters_to_dvm
  29. /* Set all RN-I ports to QoS of 15 */
  30. ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
  31. ldr x1, =0x00FF000C
  32. bl ccn504_set_qos
  33. ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
  34. ldr x1, =0x00FF000C
  35. bl ccn504_set_qos
  36. ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
  37. ldr x1, =0x00FF000C
  38. bl ccn504_set_qos
  39. ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
  40. ldr x1, =0x00FF000C
  41. bl ccn504_set_qos
  42. ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
  43. ldr x1, =0x00FF000C
  44. bl ccn504_set_qos
  45. ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
  46. ldr x1, =0x00FF000C
  47. bl ccn504_set_qos
  48. ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
  49. ldr x1, =0x00FF000C
  50. bl ccn504_set_qos
  51. ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
  52. ldr x1, =0x00FF000C
  53. bl ccn504_set_qos
  54. ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
  55. ldr x1, =0x00FF000C
  56. bl ccn504_set_qos
  57. ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
  58. ldr x1, =0x00FF000C
  59. bl ccn504_set_qos
  60. ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
  61. ldr x1, =0x00FF000C
  62. bl ccn504_set_qos
  63. ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
  64. ldr x1, =0x00FF000C
  65. bl ccn504_set_qos
  66. ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
  67. ldr x1, =0x00FF000C
  68. bl ccn504_set_qos
  69. ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
  70. ldr x1, =0x00FF000C
  71. bl ccn504_set_qos
  72. ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
  73. ldr x1, =0x00FF000C
  74. bl ccn504_set_qos
  75. ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
  76. ldr x1, =0x00FF000C
  77. bl ccn504_set_qos
  78. ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
  79. ldr x1, =0x00FF000C
  80. bl ccn504_set_qos
  81. ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
  82. ldr x1, =0x00FF000C
  83. bl ccn504_set_qos
  84. #endif
  85. /* Set the SMMU page size in the sACR register */
  86. ldr x1, =SMMU_BASE
  87. ldr w0, [x1, #0x10]
  88. orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
  89. str w0, [x1, #0x10]
  90. /* Initialize GIC Secure Bank Status */
  91. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  92. branch_if_slave x0, 1f
  93. ldr x0, =GICD_BASE
  94. bl gic_init_secure
  95. 1:
  96. #ifdef CONFIG_GICV3
  97. ldr x0, =GICR_BASE
  98. bl gic_init_secure_percpu
  99. #elif defined(CONFIG_GICV2)
  100. ldr x0, =GICD_BASE
  101. ldr x1, =GICC_BASE
  102. bl gic_init_secure_percpu
  103. #endif
  104. #endif
  105. branch_if_master x0, x1, 2f
  106. #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
  107. ldr x0, =secondary_boot_func
  108. blr x0
  109. #endif
  110. 2:
  111. #ifdef CONFIG_FSL_TZPC_BP147
  112. /* Set Non Secure access for all devices protected via TZPC */
  113. ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
  114. orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
  115. str w0, [x1]
  116. isb
  117. dsb sy
  118. #endif
  119. #ifdef CONFIG_FSL_TZASC_400
  120. /* Set TZASC so that:
  121. * a. We use only Region0 whose global secure write/read is EN
  122. * b. We use only Region0 whose NSAID write/read is EN
  123. *
  124. * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
  125. * placeholders.
  126. */
  127. ldr x1, =TZASC_GATE_KEEPER(0)
  128. ldr x0, [x1] /* Filter 0 Gate Keeper Register */
  129. orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
  130. str x0, [x1]
  131. ldr x1, =TZASC_GATE_KEEPER(1)
  132. ldr x0, [x1] /* Filter 0 Gate Keeper Register */
  133. orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
  134. str x0, [x1]
  135. ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
  136. ldr x0, [x1] /* Region-0 Attributes Register */
  137. orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
  138. orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
  139. str x0, [x1]
  140. ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
  141. ldr x0, [x1] /* Region-1 Attributes Register */
  142. orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
  143. orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
  144. str x0, [x1]
  145. ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
  146. ldr w0, [x1] /* Region-0 Access Register */
  147. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  148. str w0, [x1]
  149. ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
  150. ldr w0, [x1] /* Region-1 Attributes Register */
  151. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  152. str w0, [x1]
  153. isb
  154. dsb sy
  155. #endif
  156. mov lr, x29 /* Restore LR */
  157. ret
  158. ENDPROC(lowlevel_init)
  159. hnf_pstate_poll:
  160. /* x0 has the desired status, return 0 for success, 1 for timeout
  161. * clobber x1, x2, x3, x4, x6, x7
  162. */
  163. mov x1, x0
  164. mov x7, #0 /* flag for timeout */
  165. mrs x3, cntpct_el0 /* read timer */
  166. add x3, x3, #1200 /* timeout after 100 microseconds */
  167. mov x0, #0x18
  168. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
  169. mov w6, #8 /* HN-F node count */
  170. 1:
  171. ldr x2, [x0]
  172. cmp x2, x1 /* check status */
  173. b.eq 2f
  174. mrs x4, cntpct_el0
  175. cmp x4, x3
  176. b.ls 1b
  177. mov x7, #1 /* timeout */
  178. b 3f
  179. 2:
  180. add x0, x0, #0x10000 /* move to next node */
  181. subs w6, w6, #1
  182. cbnz w6, 1b
  183. 3:
  184. mov x0, x7
  185. ret
  186. hnf_set_pstate:
  187. /* x0 has the desired state, clobber x1, x2, x6 */
  188. mov x1, x0
  189. /* power state to SFONLY */
  190. mov w6, #8 /* HN-F node count */
  191. mov x0, #0x10
  192. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
  193. 1: /* set pstate to sfonly */
  194. ldr x2, [x0]
  195. and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
  196. orr x2, x2, x1
  197. str x2, [x0]
  198. add x0, x0, #0x10000 /* move to next node */
  199. subs w6, w6, #1
  200. cbnz w6, 1b
  201. ret
  202. ENTRY(__asm_flush_l3_cache)
  203. /*
  204. * Return status in x0
  205. * success 0
  206. * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
  207. */
  208. mov x29, lr
  209. mov x8, #0
  210. dsb sy
  211. mov x0, #0x1 /* HNFPSTAT_SFONLY */
  212. bl hnf_set_pstate
  213. mov x0, #0x4 /* SFONLY status */
  214. bl hnf_pstate_poll
  215. cbz x0, 1f
  216. mov x8, #1 /* timeout */
  217. 1:
  218. dsb sy
  219. mov x0, #0x3 /* HNFPSTAT_FAM */
  220. bl hnf_set_pstate
  221. mov x0, #0xc /* FAM status */
  222. bl hnf_pstate_poll
  223. cbz x0, 1f
  224. add x8, x8, #0x2
  225. 1:
  226. mov x0, x8
  227. mov lr, x29
  228. ret
  229. ENDPROC(__asm_flush_l3_cache)
  230. #ifdef CONFIG_MP
  231. /* Keep literals not used by the secondary boot code outside it */
  232. .ltorg
  233. /* Using 64 bit alignment since the spin table is accessed as data */
  234. .align 4
  235. .global secondary_boot_code
  236. /* Secondary Boot Code starts here */
  237. secondary_boot_code:
  238. .global __spin_table
  239. __spin_table:
  240. .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
  241. .align 2
  242. ENTRY(secondary_boot_func)
  243. /*
  244. * MPIDR_EL1 Fields:
  245. * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
  246. * MPIDR[7:2] = AFF0_RES
  247. * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
  248. * MPIDR[23:16] = AFF2_CLUSTERID
  249. * MPIDR[24] = MT
  250. * MPIDR[29:25] = RES0
  251. * MPIDR[30] = U
  252. * MPIDR[31] = ME
  253. * MPIDR[39:32] = AFF3
  254. *
  255. * Linear Processor ID (LPID) calculation from MPIDR_EL1:
  256. * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
  257. * until AFF2_CLUSTERID and AFF3 have non-zero values)
  258. *
  259. * LPID = MPIDR[15:8] | MPIDR[1:0]
  260. */
  261. mrs x0, mpidr_el1
  262. ubfm x1, x0, #8, #15
  263. ubfm x2, x0, #0, #1
  264. orr x10, x2, x1, lsl #2 /* x10 has LPID */
  265. ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
  266. /*
  267. * offset of the spin table element for this core from start of spin
  268. * table (each elem is padded to 64 bytes)
  269. */
  270. lsl x1, x10, #6
  271. ldr x0, =__spin_table
  272. /* physical address of this cpus spin table element */
  273. add x11, x1, x0
  274. ldr x0, =__real_cntfrq
  275. ldr x0, [x0]
  276. msr cntfrq_el0, x0 /* set with real frequency */
  277. str x9, [x11, #16] /* LPID */
  278. mov x4, #1
  279. str x4, [x11, #8] /* STATUS */
  280. dsb sy
  281. #if defined(CONFIG_GICV3)
  282. gic_wait_for_interrupt_m x0
  283. #elif defined(CONFIG_GICV2)
  284. ldr x0, =GICC_BASE
  285. gic_wait_for_interrupt_m x0, w1
  286. #endif
  287. bl secondary_switch_to_el2
  288. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  289. bl secondary_switch_to_el1
  290. #endif
  291. slave_cpu:
  292. wfe
  293. ldr x0, [x11]
  294. cbz x0, slave_cpu
  295. #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
  296. mrs x1, sctlr_el2
  297. #else
  298. mrs x1, sctlr_el1
  299. #endif
  300. tbz x1, #25, cpu_is_le
  301. rev x0, x0 /* BE to LE conversion */
  302. cpu_is_le:
  303. br x0 /* branch to the given address */
  304. ENDPROC(secondary_boot_func)
  305. ENTRY(secondary_switch_to_el2)
  306. switch_el x0, 1f, 0f, 0f
  307. 0: ret
  308. 1: armv8_switch_to_el2_m x0
  309. ENDPROC(secondary_switch_to_el2)
  310. ENTRY(secondary_switch_to_el1)
  311. switch_el x0, 0f, 1f, 0f
  312. 0: ret
  313. 1: armv8_switch_to_el1_m x0, x1
  314. ENDPROC(secondary_switch_to_el1)
  315. /* Ensure that the literals used by the secondary boot code are
  316. * assembled within it (this is required so that we can protect
  317. * this area with a single memreserve region
  318. */
  319. .ltorg
  320. /* 64 bit alignment for elements accessed as data */
  321. .align 4
  322. .global __real_cntfrq
  323. __real_cntfrq:
  324. .quad COUNTER_FREQUENCY
  325. .globl __secondary_boot_code_size
  326. .type __secondary_boot_code_size, %object
  327. /* Secondary Boot Code ends here */
  328. __secondary_boot_code_size:
  329. .quad .-secondary_boot_code
  330. #endif