lowlevel.S 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. /*
  2. * (C) Copyright 2014-2015 Freescale Semiconductor
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. *
  6. * Extracted from armv8/start.S
  7. */
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/gic.h>
  11. #include <asm/macro.h>
  12. #ifdef CONFIG_MP
  13. #include <asm/arch/mp.h>
  14. #endif
  15. ENTRY(lowlevel_init)
  16. mov x29, lr /* Save LR */
  17. #ifdef CONFIG_FSL_LSCH3
  18. /* Add fully-coherent masters to DVM domain */
  19. ldr x0, =CCI_MN_BASE
  20. ldr x1, =CCI_MN_RNF_NODEID_LIST
  21. ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
  22. bl ccn504_add_masters_to_dvm
  23. /* Set all RN-I ports to QoS of 15 */
  24. ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
  25. ldr x1, =0x00FF000C
  26. bl ccn504_set_qos
  27. ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
  28. ldr x1, =0x00FF000C
  29. bl ccn504_set_qos
  30. ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
  31. ldr x1, =0x00FF000C
  32. bl ccn504_set_qos
  33. ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
  34. ldr x1, =0x00FF000C
  35. bl ccn504_set_qos
  36. ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
  37. ldr x1, =0x00FF000C
  38. bl ccn504_set_qos
  39. ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
  40. ldr x1, =0x00FF000C
  41. bl ccn504_set_qos
  42. ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
  43. ldr x1, =0x00FF000C
  44. bl ccn504_set_qos
  45. ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
  46. ldr x1, =0x00FF000C
  47. bl ccn504_set_qos
  48. ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
  49. ldr x1, =0x00FF000C
  50. bl ccn504_set_qos
  51. ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
  52. ldr x1, =0x00FF000C
  53. bl ccn504_set_qos
  54. ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
  55. ldr x1, =0x00FF000C
  56. bl ccn504_set_qos
  57. ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
  58. ldr x1, =0x00FF000C
  59. bl ccn504_set_qos
  60. ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
  61. ldr x1, =0x00FF000C
  62. bl ccn504_set_qos
  63. ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
  64. ldr x1, =0x00FF000C
  65. bl ccn504_set_qos
  66. ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
  67. ldr x1, =0x00FF000C
  68. bl ccn504_set_qos
  69. ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
  70. ldr x1, =0x00FF000C
  71. bl ccn504_set_qos
  72. ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
  73. ldr x1, =0x00FF000C
  74. bl ccn504_set_qos
  75. ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
  76. ldr x1, =0x00FF000C
  77. bl ccn504_set_qos
  78. #endif
  79. /* Set the SMMU page size in the sACR register */
  80. ldr x1, =SMMU_BASE
  81. ldr w0, [x1, #0x10]
  82. orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
  83. str w0, [x1, #0x10]
  84. /* Initialize GIC Secure Bank Status */
  85. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  86. branch_if_slave x0, 1f
  87. ldr x0, =GICD_BASE
  88. bl gic_init_secure
  89. 1:
  90. #ifdef CONFIG_GICV3
  91. ldr x0, =GICR_BASE
  92. bl gic_init_secure_percpu
  93. #elif defined(CONFIG_GICV2)
  94. ldr x0, =GICD_BASE
  95. ldr x1, =GICC_BASE
  96. bl gic_init_secure_percpu
  97. #endif
  98. #endif
  99. branch_if_master x0, x1, 2f
  100. #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
  101. ldr x0, =secondary_boot_func
  102. blr x0
  103. #endif
  104. 2:
  105. #ifdef CONFIG_FSL_TZPC_BP147
  106. /* Set Non Secure access for all devices protected via TZPC */
  107. ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
  108. orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
  109. str w0, [x1]
  110. isb
  111. dsb sy
  112. #endif
  113. #ifdef CONFIG_FSL_TZASC_400
  114. /* Set TZASC so that:
  115. * a. We use only Region0 whose global secure write/read is EN
  116. * b. We use only Region0 whose NSAID write/read is EN
  117. *
  118. * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
  119. * placeholders.
  120. */
  121. ldr x1, =TZASC_GATE_KEEPER(0)
  122. ldr x0, [x1] /* Filter 0 Gate Keeper Register */
  123. orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
  124. str x0, [x1]
  125. ldr x1, =TZASC_GATE_KEEPER(1)
  126. ldr x0, [x1] /* Filter 0 Gate Keeper Register */
  127. orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */
  128. str x0, [x1]
  129. ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
  130. ldr x0, [x1] /* Region-0 Attributes Register */
  131. orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
  132. orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
  133. str x0, [x1]
  134. ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
  135. ldr x0, [x1] /* Region-1 Attributes Register */
  136. orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */
  137. orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */
  138. str x0, [x1]
  139. ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
  140. ldr w0, [x1] /* Region-0 Access Register */
  141. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  142. str w0, [x1]
  143. ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
  144. ldr w0, [x1] /* Region-1 Attributes Register */
  145. mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
  146. str w0, [x1]
  147. isb
  148. dsb sy
  149. #endif
  150. mov lr, x29 /* Restore LR */
  151. ret
  152. ENDPROC(lowlevel_init)
  153. hnf_pstate_poll:
  154. /* x0 has the desired status, return 0 for success, 1 for timeout
  155. * clobber x1, x2, x3, x4, x6, x7
  156. */
  157. mov x1, x0
  158. mov x7, #0 /* flag for timeout */
  159. mrs x3, cntpct_el0 /* read timer */
  160. add x3, x3, #1200 /* timeout after 100 microseconds */
  161. mov x0, #0x18
  162. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
  163. mov w6, #8 /* HN-F node count */
  164. 1:
  165. ldr x2, [x0]
  166. cmp x2, x1 /* check status */
  167. b.eq 2f
  168. mrs x4, cntpct_el0
  169. cmp x4, x3
  170. b.ls 1b
  171. mov x7, #1 /* timeout */
  172. b 3f
  173. 2:
  174. add x0, x0, #0x10000 /* move to next node */
  175. subs w6, w6, #1
  176. cbnz w6, 1b
  177. 3:
  178. mov x0, x7
  179. ret
  180. hnf_set_pstate:
  181. /* x0 has the desired state, clobber x1, x2, x6 */
  182. mov x1, x0
  183. /* power state to SFONLY */
  184. mov w6, #8 /* HN-F node count */
  185. mov x0, #0x10
  186. movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
  187. 1: /* set pstate to sfonly */
  188. ldr x2, [x0]
  189. and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
  190. orr x2, x2, x1
  191. str x2, [x0]
  192. add x0, x0, #0x10000 /* move to next node */
  193. subs w6, w6, #1
  194. cbnz w6, 1b
  195. ret
  196. ENTRY(__asm_flush_l3_cache)
  197. /*
  198. * Return status in x0
  199. * success 0
  200. * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both
  201. */
  202. mov x29, lr
  203. mov x8, #0
  204. dsb sy
  205. mov x0, #0x1 /* HNFPSTAT_SFONLY */
  206. bl hnf_set_pstate
  207. mov x0, #0x4 /* SFONLY status */
  208. bl hnf_pstate_poll
  209. cbz x0, 1f
  210. mov x8, #1 /* timeout */
  211. 1:
  212. dsb sy
  213. mov x0, #0x3 /* HNFPSTAT_FAM */
  214. bl hnf_set_pstate
  215. mov x0, #0xc /* FAM status */
  216. bl hnf_pstate_poll
  217. cbz x0, 1f
  218. add x8, x8, #0x2
  219. 1:
  220. mov x0, x8
  221. mov lr, x29
  222. ret
  223. ENDPROC(__asm_flush_l3_cache)
  224. #ifdef CONFIG_MP
  225. /* Keep literals not used by the secondary boot code outside it */
  226. .ltorg
  227. /* Using 64 bit alignment since the spin table is accessed as data */
  228. .align 4
  229. .global secondary_boot_code
  230. /* Secondary Boot Code starts here */
  231. secondary_boot_code:
  232. .global __spin_table
  233. __spin_table:
  234. .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
  235. .align 2
  236. ENTRY(secondary_boot_func)
  237. /*
  238. * MPIDR_EL1 Fields:
  239. * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
  240. * MPIDR[7:2] = AFF0_RES
  241. * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
  242. * MPIDR[23:16] = AFF2_CLUSTERID
  243. * MPIDR[24] = MT
  244. * MPIDR[29:25] = RES0
  245. * MPIDR[30] = U
  246. * MPIDR[31] = ME
  247. * MPIDR[39:32] = AFF3
  248. *
  249. * Linear Processor ID (LPID) calculation from MPIDR_EL1:
  250. * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
  251. * until AFF2_CLUSTERID and AFF3 have non-zero values)
  252. *
  253. * LPID = MPIDR[15:8] | MPIDR[1:0]
  254. */
  255. mrs x0, mpidr_el1
  256. ubfm x1, x0, #8, #15
  257. ubfm x2, x0, #0, #1
  258. orr x10, x2, x1, lsl #2 /* x10 has LPID */
  259. ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
  260. /*
  261. * offset of the spin table element for this core from start of spin
  262. * table (each elem is padded to 64 bytes)
  263. */
  264. lsl x1, x10, #6
  265. ldr x0, =__spin_table
  266. /* physical address of this cpus spin table element */
  267. add x11, x1, x0
  268. ldr x0, =__real_cntfrq
  269. ldr x0, [x0]
  270. msr cntfrq_el0, x0 /* set with real frequency */
  271. str x9, [x11, #16] /* LPID */
  272. mov x4, #1
  273. str x4, [x11, #8] /* STATUS */
  274. dsb sy
  275. #if defined(CONFIG_GICV3)
  276. gic_wait_for_interrupt_m x0
  277. #elif defined(CONFIG_GICV2)
  278. ldr x0, =GICC_BASE
  279. gic_wait_for_interrupt_m x0, w1
  280. #endif
  281. bl secondary_switch_to_el2
  282. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  283. bl secondary_switch_to_el1
  284. #endif
  285. slave_cpu:
  286. wfe
  287. ldr x0, [x11]
  288. cbz x0, slave_cpu
  289. #ifndef CONFIG_ARMV8_SWITCH_TO_EL1
  290. mrs x1, sctlr_el2
  291. #else
  292. mrs x1, sctlr_el1
  293. #endif
  294. tbz x1, #25, cpu_is_le
  295. rev x0, x0 /* BE to LE conversion */
  296. cpu_is_le:
  297. br x0 /* branch to the given address */
  298. ENDPROC(secondary_boot_func)
  299. ENTRY(secondary_switch_to_el2)
  300. switch_el x0, 1f, 0f, 0f
  301. 0: ret
  302. 1: armv8_switch_to_el2_m x0
  303. ENDPROC(secondary_switch_to_el2)
  304. ENTRY(secondary_switch_to_el1)
  305. switch_el x0, 0f, 1f, 0f
  306. 0: ret
  307. 1: armv8_switch_to_el1_m x0, x1
  308. ENDPROC(secondary_switch_to_el1)
  309. /* Ensure that the literals used by the secondary boot code are
  310. * assembled within it (this is required so that we can protect
  311. * this area with a single memreserve region
  312. */
  313. .ltorg
  314. /* 64 bit alignment for elements accessed as data */
  315. .align 4
  316. .global __real_cntfrq
  317. __real_cntfrq:
  318. .quad COUNTER_FREQUENCY
  319. .globl __secondary_boot_code_size
  320. .type __secondary_boot_code_size, %object
  321. /* Secondary Boot Code ends here */
  322. __secondary_boot_code_size:
  323. .quad .-secondary_boot_code
  324. #endif