start.S 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * (C) Copyright 2013
  3. * David Feng <fenghua@phytium.com.cn>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <asm-offsets.h>
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/macro.h>
  11. #include <asm/armv8/mmu.h>
  12. /*************************************************************************
  13. *
  14. * Startup Code (reset vector)
  15. *
  16. *************************************************************************/
  17. .globl _start
  18. _start:
  19. b reset
  20. .align 3
  21. .globl _TEXT_BASE
  22. _TEXT_BASE:
  23. .quad CONFIG_SYS_TEXT_BASE
  24. /*
  25. * These are defined in the linker script.
  26. */
  27. .globl _end_ofs
  28. _end_ofs:
  29. .quad _end - _start
  30. .globl _bss_start_ofs
  31. _bss_start_ofs:
  32. .quad __bss_start - _start
  33. .globl _bss_end_ofs
  34. _bss_end_ofs:
  35. .quad __bss_end - _start
  36. reset:
  37. #ifdef CONFIG_SYS_RESET_SCTRL
  38. bl reset_sctrl
  39. #endif
  40. /*
  41. * Could be EL3/EL2/EL1, Initial State:
  42. * Little Endian, MMU Disabled, i/dCache Disabled
  43. */
  44. adr x0, vectors
  45. switch_el x1, 3f, 2f, 1f
  46. 3: msr vbar_el3, x0
  47. mrs x0, scr_el3
  48. orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
  49. msr scr_el3, x0
  50. msr cptr_el3, xzr /* Enable FP/SIMD */
  51. #ifdef COUNTER_FREQUENCY
  52. ldr x0, =COUNTER_FREQUENCY
  53. msr cntfrq_el0, x0 /* Initialize CNTFRQ */
  54. #endif
  55. b 0f
  56. 2: msr vbar_el2, x0
  57. mov x0, #0x33ff
  58. msr cptr_el2, x0 /* Enable FP/SIMD */
  59. b 0f
  60. 1: msr vbar_el1, x0
  61. mov x0, #3 << 20
  62. msr cpacr_el1, x0 /* Enable FP/SIMD */
  63. 0:
  64. /* Apply ARM core specific erratas */
  65. bl apply_core_errata
  66. /*
  67. * Cache/BPB/TLB Invalidate
  68. * i-cache is invalidated before enabled in icache_enable()
  69. * tlb is invalidated before mmu is enabled in dcache_enable()
  70. * d-cache is invalidated before enabled in dcache_enable()
  71. */
  72. /* Processor specific initialization */
  73. bl lowlevel_init
  74. #ifdef CONFIG_ARMV8_MULTIENTRY
  75. branch_if_master x0, x1, master_cpu
  76. /*
  77. * Slave CPUs
  78. */
  79. slave_cpu:
  80. wfe
  81. ldr x1, =CPU_RELEASE_ADDR
  82. ldr x0, [x1]
  83. cbz x0, slave_cpu
  84. br x0 /* branch to the given address */
  85. master_cpu:
  86. /* On the master CPU */
  87. #endif /* CONFIG_ARMV8_MULTIENTRY */
  88. bl _main
  89. #ifdef CONFIG_SYS_RESET_SCTRL
  90. reset_sctrl:
  91. switch_el x1, 3f, 2f, 1f
  92. 3:
  93. mrs x0, sctlr_el3
  94. b 0f
  95. 2:
  96. mrs x0, sctlr_el2
  97. b 0f
  98. 1:
  99. mrs x0, sctlr_el1
  100. 0:
  101. ldr x1, =0xfdfffffa
  102. and x0, x0, x1
  103. switch_el x1, 6f, 5f, 4f
  104. 6:
  105. msr sctlr_el3, x0
  106. b 7f
  107. 5:
  108. msr sctlr_el2, x0
  109. b 7f
  110. 4:
  111. msr sctlr_el1, x0
  112. 7:
  113. dsb sy
  114. isb
  115. b __asm_invalidate_tlb_all
  116. ret
  117. #endif
  118. /*-----------------------------------------------------------------------*/
  119. WEAK(apply_core_errata)
  120. mov x29, lr /* Save LR */
  121. /* For now, we support Cortex-A57 specific errata only */
  122. /* Check if we are running on a Cortex-A57 core */
  123. branch_if_a57_core x0, apply_a57_core_errata
  124. 0:
  125. mov lr, x29 /* Restore LR */
  126. ret
  127. apply_a57_core_errata:
  128. #ifdef CONFIG_ARM_ERRATA_828024
  129. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  130. /* Disable non-allocate hint of w-b-n-a memory type */
  131. orr x0, x0, #1 << 49
  132. /* Disable write streaming no L1-allocate threshold */
  133. orr x0, x0, #3 << 25
  134. /* Disable write streaming no-allocate threshold */
  135. orr x0, x0, #3 << 27
  136. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  137. #endif
  138. #ifdef CONFIG_ARM_ERRATA_826974
  139. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  140. /* Disable speculative load execution ahead of a DMB */
  141. orr x0, x0, #1 << 59
  142. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  143. #endif
  144. #ifdef CONFIG_ARM_ERRATA_833069
  145. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  146. /* Disable Enable Invalidates of BTB bit */
  147. and x0, x0, #0xE
  148. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  149. #endif
  150. b 0b
  151. ENDPROC(apply_core_errata)
  152. /*-----------------------------------------------------------------------*/
  153. WEAK(lowlevel_init)
  154. mov x29, lr /* Save LR */
  155. #ifndef CONFIG_ARMV8_MULTIENTRY
  156. /*
  157. * For single-entry systems the lowlevel init is very simple.
  158. */
  159. ldr x0, =GICD_BASE
  160. bl gic_init_secure
  161. #else /* CONFIG_ARMV8_MULTIENTRY is set */
  162. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  163. branch_if_slave x0, 1f
  164. ldr x0, =GICD_BASE
  165. bl gic_init_secure
  166. 1:
  167. #if defined(CONFIG_GICV3)
  168. ldr x0, =GICR_BASE
  169. bl gic_init_secure_percpu
  170. #elif defined(CONFIG_GICV2)
  171. ldr x0, =GICD_BASE
  172. ldr x1, =GICC_BASE
  173. bl gic_init_secure_percpu
  174. #endif
  175. #endif
  176. branch_if_master x0, x1, 2f
  177. /*
  178. * Slave should wait for master clearing spin table.
  179. * This sync prevent salves observing incorrect
  180. * value of spin table and jumping to wrong place.
  181. */
  182. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  183. #ifdef CONFIG_GICV2
  184. ldr x0, =GICC_BASE
  185. #endif
  186. bl gic_wait_for_interrupt
  187. #endif
  188. /*
  189. * All slaves will enter EL2 and optionally EL1.
  190. */
  191. bl armv8_switch_to_el2
  192. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  193. bl armv8_switch_to_el1
  194. #endif
  195. #endif /* CONFIG_ARMV8_MULTIENTRY */
  196. 2:
  197. mov lr, x29 /* Restore LR */
  198. ret
  199. ENDPROC(lowlevel_init)
  200. WEAK(smp_kick_all_cpus)
  201. /* Kick secondary cpus up by SGI 0 interrupt */
  202. mov x29, lr /* Save LR */
  203. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  204. ldr x0, =GICD_BASE
  205. bl gic_kick_secondary_cpus
  206. #endif
  207. mov lr, x29 /* Restore LR */
  208. ret
  209. ENDPROC(smp_kick_all_cpus)
  210. /*-----------------------------------------------------------------------*/
  211. ENTRY(c_runtime_cpu_setup)
  212. /* Relocate vBAR */
  213. adr x0, vectors
  214. switch_el x1, 3f, 2f, 1f
  215. 3: msr vbar_el3, x0
  216. b 0f
  217. 2: msr vbar_el2, x0
  218. b 0f
  219. 1: msr vbar_el1, x0
  220. 0:
  221. ret
  222. ENDPROC(c_runtime_cpu_setup)