start.S 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. /*
  2. * (C) Copyright 2013
  3. * David Feng <fenghua@phytium.com.cn>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <asm-offsets.h>
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/macro.h>
  11. #include <asm/armv8/mmu.h>
  12. /*************************************************************************
  13. *
  14. * Startup Code (reset vector)
  15. *
  16. *************************************************************************/
  17. .globl _start
  18. _start:
  19. #ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK
  20. /*
  21. * Various SoCs need something special and SoC-specific up front in
  22. * order to boot, allow them to set that in their boot0.h file and then
  23. * use it here.
  24. */
  25. #include <asm/arch/boot0.h>
  26. #else
  27. b reset
  28. #endif
  29. .align 3
  30. .globl _TEXT_BASE
  31. _TEXT_BASE:
  32. .quad CONFIG_SYS_TEXT_BASE
  33. /*
  34. * These are defined in the linker script.
  35. */
  36. .globl _end_ofs
  37. _end_ofs:
  38. .quad _end - _start
  39. .globl _bss_start_ofs
  40. _bss_start_ofs:
  41. .quad __bss_start - _start
  42. .globl _bss_end_ofs
  43. _bss_end_ofs:
  44. .quad __bss_end - _start
  45. reset:
  46. /* Allow the board to save important registers */
  47. b save_boot_params
  48. .globl save_boot_params_ret
  49. save_boot_params_ret:
  50. #ifdef CONFIG_SYS_RESET_SCTRL
  51. bl reset_sctrl
  52. #endif
  53. /*
  54. * Could be EL3/EL2/EL1, Initial State:
  55. * Little Endian, MMU Disabled, i/dCache Disabled
  56. */
  57. adr x0, vectors
  58. switch_el x1, 3f, 2f, 1f
  59. 3: msr vbar_el3, x0
  60. mrs x0, scr_el3
  61. orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
  62. msr scr_el3, x0
  63. msr cptr_el3, xzr /* Enable FP/SIMD */
  64. #ifdef COUNTER_FREQUENCY
  65. ldr x0, =COUNTER_FREQUENCY
  66. msr cntfrq_el0, x0 /* Initialize CNTFRQ */
  67. #endif
  68. b 0f
  69. 2: msr vbar_el2, x0
  70. mov x0, #0x33ff
  71. msr cptr_el2, x0 /* Enable FP/SIMD */
  72. b 0f
  73. 1: msr vbar_el1, x0
  74. mov x0, #3 << 20
  75. msr cpacr_el1, x0 /* Enable FP/SIMD */
  76. 0:
  77. /*
  78. * Enable SMPEN bit for coherency.
  79. * This register is not architectural but at the moment
  80. * this bit should be set for A53/A57/A72.
  81. */
  82. #ifdef CONFIG_ARMV8_SET_SMPEN
  83. switch_el x1, 3f, 1f, 1f
  84. 3:
  85. mrs x0, S3_1_c15_c2_1 /* cpuectlr_el1 */
  86. orr x0, x0, #0x40
  87. msr S3_1_c15_c2_1, x0
  88. 1:
  89. #endif
  90. /* Apply ARM core specific erratas */
  91. bl apply_core_errata
  92. /*
  93. * Cache/BPB/TLB Invalidate
  94. * i-cache is invalidated before enabled in icache_enable()
  95. * tlb is invalidated before mmu is enabled in dcache_enable()
  96. * d-cache is invalidated before enabled in dcache_enable()
  97. */
  98. /* Processor specific initialization */
  99. bl lowlevel_init
  100. #if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
  101. branch_if_master x0, x1, master_cpu
  102. b spin_table_secondary_jump
  103. /* never return */
  104. #elif defined(CONFIG_ARMV8_MULTIENTRY)
  105. branch_if_master x0, x1, master_cpu
  106. /*
  107. * Slave CPUs
  108. */
  109. slave_cpu:
  110. wfe
  111. ldr x1, =CPU_RELEASE_ADDR
  112. ldr x0, [x1]
  113. cbz x0, slave_cpu
  114. br x0 /* branch to the given address */
  115. #endif /* CONFIG_ARMV8_MULTIENTRY */
  116. master_cpu:
  117. bl _main
  118. #ifdef CONFIG_SYS_RESET_SCTRL
  119. reset_sctrl:
  120. switch_el x1, 3f, 2f, 1f
  121. 3:
  122. mrs x0, sctlr_el3
  123. b 0f
  124. 2:
  125. mrs x0, sctlr_el2
  126. b 0f
  127. 1:
  128. mrs x0, sctlr_el1
  129. 0:
  130. ldr x1, =0xfdfffffa
  131. and x0, x0, x1
  132. switch_el x1, 6f, 5f, 4f
  133. 6:
  134. msr sctlr_el3, x0
  135. b 7f
  136. 5:
  137. msr sctlr_el2, x0
  138. b 7f
  139. 4:
  140. msr sctlr_el1, x0
  141. 7:
  142. dsb sy
  143. isb
  144. b __asm_invalidate_tlb_all
  145. ret
  146. #endif
  147. /*-----------------------------------------------------------------------*/
  148. WEAK(apply_core_errata)
  149. mov x29, lr /* Save LR */
  150. /* For now, we support Cortex-A57 specific errata only */
  151. /* Check if we are running on a Cortex-A57 core */
  152. branch_if_a57_core x0, apply_a57_core_errata
  153. 0:
  154. mov lr, x29 /* Restore LR */
  155. ret
  156. apply_a57_core_errata:
  157. #ifdef CONFIG_ARM_ERRATA_828024
  158. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  159. /* Disable non-allocate hint of w-b-n-a memory type */
  160. orr x0, x0, #1 << 49
  161. /* Disable write streaming no L1-allocate threshold */
  162. orr x0, x0, #3 << 25
  163. /* Disable write streaming no-allocate threshold */
  164. orr x0, x0, #3 << 27
  165. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  166. #endif
  167. #ifdef CONFIG_ARM_ERRATA_826974
  168. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  169. /* Disable speculative load execution ahead of a DMB */
  170. orr x0, x0, #1 << 59
  171. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  172. #endif
  173. #ifdef CONFIG_ARM_ERRATA_833471
  174. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  175. /* FPSCR write flush.
  176. * Note that in some cases where a flush is unnecessary this
  177. could impact performance. */
  178. orr x0, x0, #1 << 38
  179. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  180. #endif
  181. #ifdef CONFIG_ARM_ERRATA_829520
  182. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  183. /* Disable Indirect Predictor bit will prevent this erratum
  184. from occurring
  185. * Note that in some cases where a flush is unnecessary this
  186. could impact performance. */
  187. orr x0, x0, #1 << 4
  188. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  189. #endif
  190. #ifdef CONFIG_ARM_ERRATA_833069
  191. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  192. /* Disable Enable Invalidates of BTB bit */
  193. and x0, x0, #0xE
  194. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  195. #endif
  196. b 0b
  197. ENDPROC(apply_core_errata)
  198. /*-----------------------------------------------------------------------*/
  199. WEAK(lowlevel_init)
  200. mov x29, lr /* Save LR */
  201. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  202. branch_if_slave x0, 1f
  203. ldr x0, =GICD_BASE
  204. bl gic_init_secure
  205. 1:
  206. #if defined(CONFIG_GICV3)
  207. ldr x0, =GICR_BASE
  208. bl gic_init_secure_percpu
  209. #elif defined(CONFIG_GICV2)
  210. ldr x0, =GICD_BASE
  211. ldr x1, =GICC_BASE
  212. bl gic_init_secure_percpu
  213. #endif
  214. #endif
  215. #ifdef CONFIG_ARMV8_MULTIENTRY
  216. branch_if_master x0, x1, 2f
  217. /*
  218. * Slave should wait for master clearing spin table.
  219. * This sync prevent salves observing incorrect
  220. * value of spin table and jumping to wrong place.
  221. */
  222. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  223. #ifdef CONFIG_GICV2
  224. ldr x0, =GICC_BASE
  225. #endif
  226. bl gic_wait_for_interrupt
  227. #endif
  228. /*
  229. * All slaves will enter EL2 and optionally EL1.
  230. */
  231. adr x4, lowlevel_in_el2
  232. ldr x5, =ES_TO_AARCH64
  233. bl armv8_switch_to_el2
  234. lowlevel_in_el2:
  235. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  236. adr x4, lowlevel_in_el1
  237. ldr x5, =ES_TO_AARCH64
  238. bl armv8_switch_to_el1
  239. lowlevel_in_el1:
  240. #endif
  241. #endif /* CONFIG_ARMV8_MULTIENTRY */
  242. 2:
  243. mov lr, x29 /* Restore LR */
  244. ret
  245. ENDPROC(lowlevel_init)
  246. WEAK(smp_kick_all_cpus)
  247. /* Kick secondary cpus up by SGI 0 interrupt */
  248. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  249. ldr x0, =GICD_BASE
  250. b gic_kick_secondary_cpus
  251. #endif
  252. ret
  253. ENDPROC(smp_kick_all_cpus)
  254. /*-----------------------------------------------------------------------*/
  255. ENTRY(c_runtime_cpu_setup)
  256. /* Relocate vBAR */
  257. adr x0, vectors
  258. switch_el x1, 3f, 2f, 1f
  259. 3: msr vbar_el3, x0
  260. b 0f
  261. 2: msr vbar_el2, x0
  262. b 0f
  263. 1: msr vbar_el1, x0
  264. 0:
  265. ret
  266. ENDPROC(c_runtime_cpu_setup)
  267. WEAK(save_boot_params)
  268. b save_boot_params_ret /* back to my caller */
  269. ENDPROC(save_boot_params)