start.S 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * (C) Copyright 2013
  3. * David Feng <fenghua@phytium.com.cn>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <asm-offsets.h>
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/macro.h>
  11. #include <asm/armv8/mmu.h>
  12. /*************************************************************************
  13. *
  14. * Startup Code (reset vector)
  15. *
  16. *************************************************************************/
  17. .globl _start
  18. _start:
  19. b reset
  20. .align 3
  21. .globl _TEXT_BASE
  22. _TEXT_BASE:
  23. .quad CONFIG_SYS_TEXT_BASE
  24. /*
  25. * These are defined in the linker script.
  26. */
  27. .globl _end_ofs
  28. _end_ofs:
  29. .quad _end - _start
  30. .globl _bss_start_ofs
  31. _bss_start_ofs:
  32. .quad __bss_start - _start
  33. .globl _bss_end_ofs
  34. _bss_end_ofs:
  35. .quad __bss_end - _start
  36. reset:
  37. /*
  38. * Could be EL3/EL2/EL1, Initial State:
  39. * Little Endian, MMU Disabled, i/dCache Disabled
  40. */
  41. adr x0, vectors
  42. switch_el x1, 3f, 2f, 1f
  43. 3: msr vbar_el3, x0
  44. mrs x0, scr_el3
  45. orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
  46. msr scr_el3, x0
  47. msr cptr_el3, xzr /* Enable FP/SIMD */
  48. #ifdef COUNTER_FREQUENCY
  49. ldr x0, =COUNTER_FREQUENCY
  50. msr cntfrq_el0, x0 /* Initialize CNTFRQ */
  51. #endif
  52. b 0f
  53. 2: msr vbar_el2, x0
  54. mov x0, #0x33ff
  55. msr cptr_el2, x0 /* Enable FP/SIMD */
  56. b 0f
  57. 1: msr vbar_el1, x0
  58. mov x0, #3 << 20
  59. msr cpacr_el1, x0 /* Enable FP/SIMD */
  60. 0:
  61. /* Apply ARM core specific erratas */
  62. bl apply_core_errata
  63. /*
  64. * Cache/BPB/TLB Invalidate
  65. * i-cache is invalidated before enabled in icache_enable()
  66. * tlb is invalidated before mmu is enabled in dcache_enable()
  67. * d-cache is invalidated before enabled in dcache_enable()
  68. */
  69. /* Processor specific initialization */
  70. bl lowlevel_init
  71. #ifdef CONFIG_ARMV8_MULTIENTRY
  72. branch_if_master x0, x1, master_cpu
  73. /*
  74. * Slave CPUs
  75. */
  76. slave_cpu:
  77. wfe
  78. ldr x1, =CPU_RELEASE_ADDR
  79. ldr x0, [x1]
  80. cbz x0, slave_cpu
  81. br x0 /* branch to the given address */
  82. master_cpu:
  83. /* On the master CPU */
  84. #endif /* CONFIG_ARMV8_MULTIENTRY */
  85. bl _main
  86. /*-----------------------------------------------------------------------*/
  87. WEAK(apply_core_errata)
  88. mov x29, lr /* Save LR */
  89. /* For now, we support Cortex-A57 specific errata only */
  90. /* Check if we are running on a Cortex-A57 core */
  91. branch_if_a57_core x0, apply_a57_core_errata
  92. 0:
  93. mov lr, x29 /* Restore LR */
  94. ret
  95. apply_a57_core_errata:
  96. #ifdef CONFIG_ARM_ERRATA_828024
  97. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  98. /* Disable non-allocate hint of w-b-n-a memory type */
  99. orr x0, x0, #1 << 49
  100. /* Disable write streaming no L1-allocate threshold */
  101. orr x0, x0, #3 << 25
  102. /* Disable write streaming no-allocate threshold */
  103. orr x0, x0, #3 << 27
  104. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  105. #endif
  106. #ifdef CONFIG_ARM_ERRATA_826974
  107. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  108. /* Disable speculative load execution ahead of a DMB */
  109. orr x0, x0, #1 << 59
  110. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  111. #endif
  112. #ifdef CONFIG_ARM_ERRATA_833069
  113. mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
  114. /* Disable Enable Invalidates of BTB bit */
  115. and x0, x0, #0xE
  116. msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
  117. #endif
  118. b 0b
  119. ENDPROC(apply_core_errata)
  120. /*-----------------------------------------------------------------------*/
  121. WEAK(lowlevel_init)
  122. mov x29, lr /* Save LR */
  123. #ifndef CONFIG_ARMV8_MULTIENTRY
  124. /*
  125. * For single-entry systems the lowlevel init is very simple.
  126. */
  127. ldr x0, =GICD_BASE
  128. bl gic_init_secure
  129. #else /* CONFIG_ARMV8_MULTIENTRY is set */
  130. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  131. branch_if_slave x0, 1f
  132. ldr x0, =GICD_BASE
  133. bl gic_init_secure
  134. 1:
  135. #if defined(CONFIG_GICV3)
  136. ldr x0, =GICR_BASE
  137. bl gic_init_secure_percpu
  138. #elif defined(CONFIG_GICV2)
  139. ldr x0, =GICD_BASE
  140. ldr x1, =GICC_BASE
  141. bl gic_init_secure_percpu
  142. #endif
  143. #endif
  144. branch_if_master x0, x1, 2f
  145. /*
  146. * Slave should wait for master clearing spin table.
  147. * This sync prevent salves observing incorrect
  148. * value of spin table and jumping to wrong place.
  149. */
  150. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  151. #ifdef CONFIG_GICV2
  152. ldr x0, =GICC_BASE
  153. #endif
  154. bl gic_wait_for_interrupt
  155. #endif
  156. /*
  157. * All slaves will enter EL2 and optionally EL1.
  158. */
  159. bl armv8_switch_to_el2
  160. #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
  161. bl armv8_switch_to_el1
  162. #endif
  163. #endif /* CONFIG_ARMV8_MULTIENTRY */
  164. 2:
  165. mov lr, x29 /* Restore LR */
  166. ret
  167. ENDPROC(lowlevel_init)
  168. WEAK(smp_kick_all_cpus)
  169. /* Kick secondary cpus up by SGI 0 interrupt */
  170. mov x29, lr /* Save LR */
  171. #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
  172. ldr x0, =GICD_BASE
  173. bl gic_kick_secondary_cpus
  174. #endif
  175. mov lr, x29 /* Restore LR */
  176. ret
  177. ENDPROC(smp_kick_all_cpus)
  178. /*-----------------------------------------------------------------------*/
  179. ENTRY(c_runtime_cpu_setup)
  180. /* Relocate vBAR */
  181. adr x0, vectors
  182. switch_el x1, 3f, 2f, 1f
  183. 3: msr vbar_el3, x0
  184. b 0f
  185. 2: msr vbar_el2, x0
  186. b 0f
  187. 1: msr vbar_el1, x0
  188. 0:
  189. ret
  190. ENDPROC(c_runtime_cpu_setup)