nonsec_virt.S 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * code for switching cores into non-secure state and into HYP mode
  3. *
  4. * Copyright (c) 2013 Andre Przywara <andre.przywara@linaro.org>
  5. *
  6. * SPDX-License-Identifier: GPL-2.0+
  7. */
  8. #include <config.h>
  9. #include <linux/linkage.h>
  10. #include <asm/gic.h>
  11. #include <asm/armv7.h>
  12. #include <asm/proc-armv/ptrace.h>
  13. .arch_extension sec
  14. .arch_extension virt
  15. .pushsection ._secure.text, "ax"
  16. .align 5
  17. /* the vector table for secure state and HYP mode */
  18. _monitor_vectors:
  19. .word 0 /* reset */
  20. .word 0 /* undef */
  21. adr pc, _secure_monitor
  22. .word 0
  23. .word 0
  24. .word 0
  25. .word 0
  26. .word 0
  27. .macro is_cpu_virt_capable tmp
  28. mrc p15, 0, \tmp, c0, c1, 1 @ read ID_PFR1
  29. and \tmp, \tmp, #CPUID_ARM_VIRT_MASK @ mask virtualization bits
  30. cmp \tmp, #(1 << CPUID_ARM_VIRT_SHIFT)
  31. .endm
  32. /*
  33. * secure monitor handler
  34. * U-Boot calls this "software interrupt" in start.S
  35. * This is executed on a "smc" instruction, we use a "smc #0" to switch
  36. * to non-secure state.
  37. * r0, r1, r2: passed to the callee
  38. * ip: target PC
  39. */
  40. _secure_monitor:
  41. #ifdef CONFIG_ARMV7_PSCI
  42. ldr r5, =_psci_vectors @ Switch to the next monitor
  43. mcr p15, 0, r5, c12, c0, 1
  44. isb
  45. @ Obtain a secure stack
  46. bl psci_stack_setup
  47. @ Configure the PSCI backend
  48. push {r0, r1, r2, ip}
  49. bl psci_arch_init
  50. pop {r0, r1, r2, ip}
  51. #endif
  52. #ifdef CONFIG_ARM_ERRATA_773022
  53. mrc p15, 0, r5, c1, c0, 1
  54. orr r5, r5, #(1 << 1)
  55. mcr p15, 0, r5, c1, c0, 1
  56. isb
  57. #endif
  58. #ifdef CONFIG_ARM_ERRATA_774769
  59. mrc p15, 0, r5, c1, c0, 1
  60. orr r5, r5, #(1 << 25)
  61. mcr p15, 0, r5, c1, c0, 1
  62. isb
  63. #endif
  64. mrc p15, 0, r5, c1, c1, 0 @ read SCR
  65. bic r5, r5, #0x4a @ clear IRQ, EA, nET bits
  66. orr r5, r5, #0x31 @ enable NS, AW, FW bits
  67. @ FIQ preserved for secure mode
  68. mov r6, #SVC_MODE @ default mode is SVC
  69. is_cpu_virt_capable r4
  70. #ifdef CONFIG_ARMV7_VIRT
  71. orreq r5, r5, #0x100 @ allow HVC instruction
  72. moveq r6, #HYP_MODE @ Enter the kernel as HYP
  73. #endif
  74. mcr p15, 0, r5, c1, c1, 0 @ write SCR (with NS bit set)
  75. isb
  76. bne 1f
  77. @ Reset CNTVOFF to 0 before leaving monitor mode
  78. mrc p15, 0, r4, c0, c1, 1 @ read ID_PFR1
  79. ands r4, r4, #CPUID_ARM_GENTIMER_MASK @ test arch timer bits
  80. movne r4, #0
  81. mcrrne p15, 4, r4, r4, c14 @ Reset CNTVOFF to zero
  82. 1:
  83. mov lr, ip
  84. mov ip, #(F_BIT | I_BIT | A_BIT) @ Set A, I and F
  85. tst lr, #1 @ Check for Thumb PC
  86. orrne ip, ip, #T_BIT @ Set T if Thumb
  87. orr ip, ip, r6 @ Slot target mode in
  88. msr spsr_cxfs, ip @ Set full SPSR
  89. movs pc, lr @ ERET to non-secure
  90. ENTRY(_do_nonsec_entry)
  91. mov ip, r0
  92. mov r0, r1
  93. mov r1, r2
  94. mov r2, r3
  95. smc #0
  96. ENDPROC(_do_nonsec_entry)
  97. .macro get_cbar_addr addr
  98. #ifdef CONFIG_ARM_GIC_BASE_ADDRESS
  99. ldr \addr, =CONFIG_ARM_GIC_BASE_ADDRESS
  100. #else
  101. mrc p15, 4, \addr, c15, c0, 0 @ read CBAR
  102. bfc \addr, #0, #15 @ clear reserved bits
  103. #endif
  104. .endm
  105. .macro get_gicd_addr addr
  106. get_cbar_addr \addr
  107. add \addr, \addr, #GIC_DIST_OFFSET @ GIC dist i/f offset
  108. .endm
  109. .macro get_gicc_addr addr, tmp
  110. get_cbar_addr \addr
  111. is_cpu_virt_capable \tmp
  112. movne \tmp, #GIC_CPU_OFFSET_A9 @ GIC CPU offset for A9
  113. moveq \tmp, #GIC_CPU_OFFSET_A15 @ GIC CPU offset for A15/A7
  114. add \addr, \addr, \tmp
  115. .endm
  116. #ifndef CONFIG_ARMV7_PSCI
  117. /*
  118. * Secondary CPUs start here and call the code for the core specific parts
  119. * of the non-secure and HYP mode transition. The GIC distributor specific
  120. * code has already been executed by a C function before.
  121. * Then they go back to wfi and wait to be woken up by the kernel again.
  122. */
  123. ENTRY(_smp_pen)
  124. cpsid i
  125. cpsid f
  126. bl _nonsec_init
  127. adr r0, _smp_pen @ do not use this address again
  128. b smp_waitloop @ wait for IPIs, board specific
  129. ENDPROC(_smp_pen)
  130. #endif
  131. /*
  132. * Switch a core to non-secure state.
  133. *
  134. * 1. initialize the GIC per-core interface
  135. * 2. allow coprocessor access in non-secure modes
  136. *
  137. * Called from smp_pen by secondary cores and directly by the BSP.
  138. * Do not assume that the stack is available and only use registers
  139. * r0-r3 and r12.
  140. *
  141. * PERIPHBASE is used to get the GIC address. This could be 40 bits long,
  142. * though, but we check this in C before calling this function.
  143. */
  144. ENTRY(_nonsec_init)
  145. get_gicd_addr r3
  146. mvn r1, #0 @ all bits to 1
  147. str r1, [r3, #GICD_IGROUPRn] @ allow private interrupts
  148. get_gicc_addr r3, r1
  149. mov r1, #3 @ Enable both groups
  150. str r1, [r3, #GICC_CTLR] @ and clear all other bits
  151. mov r1, #0xff
  152. str r1, [r3, #GICC_PMR] @ set priority mask register
  153. mrc p15, 0, r0, c1, c1, 2
  154. movw r1, #0x3fff
  155. movt r1, #0x0004
  156. orr r0, r0, r1
  157. mcr p15, 0, r0, c1, c1, 2 @ NSACR = all copros to non-sec
  158. /* The CNTFRQ register of the generic timer needs to be
  159. * programmed in secure state. Some primary bootloaders / firmware
  160. * omit this, so if the frequency is provided in the configuration,
  161. * we do this here instead.
  162. * But first check if we have the generic timer.
  163. */
  164. #ifdef COUNTER_FREQUENCY
  165. mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1
  166. and r0, r0, #CPUID_ARM_GENTIMER_MASK @ mask arch timer bits
  167. cmp r0, #(1 << CPUID_ARM_GENTIMER_SHIFT)
  168. ldreq r1, =COUNTER_FREQUENCY
  169. mcreq p15, 0, r1, c14, c0, 0 @ write CNTFRQ
  170. #endif
  171. adr r1, _monitor_vectors
  172. mcr p15, 0, r1, c12, c0, 1 @ set MVBAR to secure vectors
  173. isb
  174. mov r0, r3 @ return GICC address
  175. bx lr
  176. ENDPROC(_nonsec_init)
  177. #ifdef CONFIG_SMP_PEN_ADDR
  178. /* void __weak smp_waitloop(unsigned previous_address); */
  179. ENTRY(smp_waitloop)
  180. wfi
  181. ldr r1, =CONFIG_SMP_PEN_ADDR @ load start address
  182. ldr r1, [r1]
  183. #ifdef CONFIG_PEN_ADDR_BIG_ENDIAN
  184. rev r1, r1
  185. #endif
  186. cmp r0, r1 @ make sure we dont execute this code
  187. beq smp_waitloop @ again (due to a spurious wakeup)
  188. mov r0, r1
  189. b _do_nonsec_entry
  190. ENDPROC(smp_waitloop)
  191. .weak smp_waitloop
  192. #endif
  193. .popsection