start.S 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364
  1. /*
  2. * (C) Copyright 2013
  3. * David Feng <fenghua@phytium.com.cn>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <asm-offsets.h>
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/macro.h>
  11. #include <asm/armv8/mmu.h>
  12. /*************************************************************************
  13. *
  14. * Startup Code (reset vector)
  15. *
  16. *************************************************************************/
  17. .globl _start
  18. _start:
  19. #if defined(LINUX_KERNEL_IMAGE_HEADER)
  20. #include <asm/boot0-linux-kernel-header.h>
  21. #elif defined(CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK)
  22. /*
  23. * Various SoCs need something special and SoC-specific up front in
  24. * order to boot, allow them to set that in their boot0.h file and then
  25. * use it here.
  26. */
  27. #include <asm/arch/boot0.h>
  28. #else
  29. b reset
  30. #endif
  31. .align 3
  32. .globl _TEXT_BASE
  33. _TEXT_BASE:
  34. .quad CONFIG_SYS_TEXT_BASE
  35. /*
  36. * These are defined in the linker script.
  37. */
  38. .globl _end_ofs
  39. _end_ofs:
  40. .quad _end - _start
  41. .globl _bss_start_ofs
  42. _bss_start_ofs:
  43. .quad __bss_start - _start
  44. .globl _bss_end_ofs
  45. _bss_end_ofs:
  46. .quad __bss_end - _start
  47. reset:
  48. /* Allow the board to save important registers */
  49. b save_boot_params
  50. .globl save_boot_params_ret
  51. save_boot_params_ret:
  52. #if CONFIG_POSITION_INDEPENDENT
  53. /*
  54. * Fix .rela.dyn relocations. This allows U-Boot to be loaded to and
  55. * executed at a different address than it was linked at.
  56. */
  57. pie_fixup:
  58. adr x0, _start /* x0 <- Runtime value of _start */
  59. ldr x1, _TEXT_BASE /* x1 <- Linked value of _start */
  60. sub x9, x0, x1 /* x9 <- Run-vs-link offset */
  61. adr x2, __rel_dyn_start /* x2 <- Runtime &__rel_dyn_start */
  62. adr x3, __rel_dyn_end /* x3 <- Runtime &__rel_dyn_end */
  63. pie_fix_loop:
  64. ldp x0, x1, [x2], #16 /* (x0, x1) <- (Link location, fixup) */
  65. ldr x4, [x2], #8 /* x4 <- addend */
  66. cmp w1, #1027 /* relative fixup? */
  67. bne pie_skip_reloc
  68. /* relative fix: store addend plus offset at dest location */
  69. add x0, x0, x9
  70. add x4, x4, x9
  71. str x4, [x0]
  72. pie_skip_reloc:
  73. cmp x2, x3
  74. b.lo pie_fix_loop
  75. pie_fixup_done:
  76. #endif
  77. #ifdef CONFIG_SYS_RESET_SCTRL
  78. bl reset_sctrl
  79. #endif
  80. /*
  81. * Could be EL3/EL2/EL1, Initial State:
  82. * Little Endian, MMU Disabled, i/dCache Disabled
  83. */
  84. adr x0, vectors
  85. switch_el x1, 3f, 2f, 1f
  86. 3: msr vbar_el3, x0
  87. mrs x0, scr_el3
  88. orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
  89. msr scr_el3, x0
  90. msr cptr_el3, xzr /* Enable FP/SIMD */
  91. #ifdef COUNTER_FREQUENCY
  92. ldr x0, =COUNTER_FREQUENCY
  93. msr cntfrq_el0, x0 /* Initialize CNTFRQ */
  94. #endif
  95. b 0f
  96. 2: msr vbar_el2, x0
  97. mov x0, #0x33ff
  98. msr cptr_el2, x0 /* Enable FP/SIMD */
  99. b 0f
  100. 1: msr vbar_el1, x0
  101. mov x0, #3 << 20
  102. msr cpacr_el1, x0 /* Enable FP/SIMD */
  103. 0:
  104. /*
  105. * Enable SMPEN bit for coherency.
  106. * This register is not architectural but at the moment
  107. * this bit should be set for A53/A57/A72.
  108. */
  109. #ifdef CONFIG_ARMV8_SET_SMPEN
  110. switch_el x1, 3f, 1f, 1f
  111. 3:
  112. mrs x0, S3_1_c15_c2_1 /* cpuectlr_el1 */
  113. orr x0, x0, #0x40
  114. msr S3_1_c15_c2_1, x0
  115. 1:
  116. #endif
  117. /* Apply ARM core specific erratas */
  118. bl apply_core_errata
  119. /*
  120. * Cache/BPB/TLB Invalidate
  121. * i-cache is invalidated before enabled in icache_enable()
  122. * tlb is invalidated before mmu is enabled in dcache_enable()
  123. * d-cache is invalidated before enabled in dcache_enable()
  124. */
  125. /* Processor specific initialization */
  126. bl lowlevel_init
  127. #if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
  128. branch_if_master x0, x1, master_cpu
  129. b spin_table_secondary_jump
  130. /* never return */
  131. #elif defined(CONFIG_ARMV8_MULTIENTRY)
  132. branch_if_master x0, x1, master_cpu
  133. /*
  134. * Slave CPUs
  135. */
  136. slave_cpu:
  137. wfe
  138. ldr x1, =CPU_RELEASE_ADDR
  139. ldr x0, [x1]
  140. cbz x0, slave_cpu
  141. br x0 /* branch to the given address */
  142. #endif /* CONFIG_ARMV8_MULTIENTRY */
  143. master_cpu:
  144. bl _main
  145. #ifdef CONFIG_SYS_RESET_SCTRL
  146. reset_sctrl:
  147. switch_el x1, 3f, 2f, 1f
  148. 3:
  149. mrs x0, sctlr_el3
  150. b 0f
  151. 2:
  152. mrs x0, sctlr_el2
  153. b 0f
  154. 1:
  155. mrs x0, sctlr_el1
  156. 0:
  157. ldr x1, =0xfdfffffa
  158. and x0, x0, x1
  159. switch_el x1, 6f, 5f, 4f
  160. 6:
  161. msr sctlr_el3, x0
  162. b 7f
  163. 5:
  164. msr sctlr_el2, x0
  165. b 7f
  166. 4:
  167. msr sctlr_el1, x0
  168. 7:
  169. dsb sy
  170. isb
  171. b __asm_invalidate_tlb_all
  172. ret
  173. #endif
  174. /*-----------------------------------------------------------------------*/
  175. WEAK(apply_core_errata)
  176. mov x29, lr /* Save LR */
  177. /* For now, we support Cortex-A53, Cortex-A57 specific errata */
  178. /* Check if we are running on a Cortex-A53 core */
  179. branch_if_a53_core x0, apply_a53_core_errata
  180. /* Check if we are running on a Cortex-A57 core */
  181. branch_if_a57_core x0, apply_a57_core_errata
  182. 0:
  183. mov lr, x29 /* Restore LR */
  184. ret
  185. apply_a53_core_errata:
  186. #ifdef CONFIG_ARM_ERRATA_855873
  187. mrs x0, midr_el1
  188. tst x0, #(0xf << 20)
  189. b.ne 0b
  190. mrs x0, midr_el1
  191. and x0, x0, #0xf
  192. cmp x0, #3
  193. b.lt 0b
  194. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  195. /* Enable data cache clean as data cache clean/invalidate */
  196. orr x0, x0, #1 << 44
  197. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  198. #endif
  199. b 0b
  200. apply_a57_core_errata:
  201. #ifdef CONFIG_ARM_ERRATA_828024
  202. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  203. /* Disable non-allocate hint of w-b-n-a memory type */
  204. orr x0, x0, #1 << 49
  205. /* Disable write streaming no L1-allocate threshold */
  206. orr x0, x0, #3 << 25
  207. /* Disable write streaming no-allocate threshold */
  208. orr x0, x0, #3 << 27
  209. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  210. #endif
  211. #ifdef CONFIG_ARM_ERRATA_826974
  212. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  213. /* Disable speculative load execution ahead of a DMB */
  214. orr x0, x0, #1 << 59
  215. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  216. #endif
  217. #ifdef CONFIG_ARM_ERRATA_833471
  218. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  219. /* FPSCR write flush.
  220. * Note that in some cases where a flush is unnecessary this
  221. could impact performance. */
  222. orr x0, x0, #1 << 38
  223. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  224. #endif
  225. #ifdef CONFIG_ARM_ERRATA_829520
  226. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  227. /* Disable Indirect Predictor bit will prevent this erratum
  228. from occurring
  229. * Note that in some cases where a flush is unnecessary this
  230. could impact performance. */
  231. orr x0, x0, #1 << 4
  232. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  233. #endif
  234. #ifdef CONFIG_ARM_ERRATA_833069
  235. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  236. /* Disable Enable Invalidates of BTB bit */
  237. and x0, x0, #0xE
  238. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  239. #endif
  240. b 0b
  241. ENDPROC(apply_core_errata)
  242. /*-----------------------------------------------------------------------*/
  243. WEAK(lowlevel_init)
  244. mov x29, lr /* Save LR */
  245. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  246. branch_if_slave x0, 1f
  247. ldr x0, =GICD_BASE
  248. bl gic_init_secure
  249. 1:
  250. #if defined(CONFIG_GICV3)
  251. ldr x0, =GICR_BASE
  252. bl gic_init_secure_percpu
  253. #elif defined(CONFIG_GICV2)
  254. ldr x0, =GICD_BASE
  255. ldr x1, =GICC_BASE
  256. bl gic_init_secure_percpu
  257. #endif
  258. #endif
  259. #ifdef CONFIG_ARMV8_MULTIENTRY
  260. branch_if_master x0, x1, 2f
  261. /*
  262. * Slave should wait for master clearing spin table.
  263. * This sync prevent salves observing incorrect
  264. * value of spin table and jumping to wrong place.
  265. */
  266. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  267. #ifdef CONFIG_GICV2
  268. ldr x0, =GICC_BASE
  269. #endif
  270. bl gic_wait_for_interrupt
  271. #endif
  272. /*
  273. * All slaves will enter EL2 and optionally EL1.
  274. */
  275. adr x4, lowlevel_in_el2
  276. ldr x5, =ES_TO_AARCH64
  277. bl armv8_switch_to_el2
  278. lowlevel_in_el2:
  279. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  280. adr x4, lowlevel_in_el1
  281. ldr x5, =ES_TO_AARCH64
  282. bl armv8_switch_to_el1
  283. lowlevel_in_el1:
  284. #endif
  285. #endif /* CONFIG_ARMV8_MULTIENTRY */
  286. 2:
  287. mov lr, x29 /* Restore LR */
  288. ret
  289. ENDPROC(lowlevel_init)
  290. WEAK(smp_kick_all_cpus)
  291. /* Kick secondary cpus up by SGI 0 interrupt */
  292. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  293. ldr x0, =GICD_BASE
  294. b gic_kick_secondary_cpus
  295. #endif
  296. ret
  297. ENDPROC(smp_kick_all_cpus)
  298. /*-----------------------------------------------------------------------*/
  299. ENTRY(c_runtime_cpu_setup)
  300. /* Relocate vBAR */
  301. adr x0, vectors
  302. switch_el x1, 3f, 2f, 1f
  303. 3: msr vbar_el3, x0
  304. b 0f
  305. 2: msr vbar_el2, x0
  306. b 0f
  307. 1: msr vbar_el1, x0
  308. 0:
  309. ret
  310. ENDPROC(c_runtime_cpu_setup)
  311. WEAK(save_boot_params)
  312. b save_boot_params_ret /* back to my caller */
  313. ENDPROC(save_boot_params)