psci.S 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. /*
  2. * Copyright (C) 2013,2014 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <config.h>
  18. #include <linux/linkage.h>
  19. #include <asm/macro.h>
  20. #include <asm/psci.h>
  21. .pushsection ._secure.text, "ax"
  22. .arch_extension sec
  23. .align 5
  24. .globl _psci_vectors
  25. _psci_vectors:
  26. b default_psci_vector @ reset
  27. b default_psci_vector @ undef
  28. b _smc_psci @ smc
  29. b default_psci_vector @ pabort
  30. b default_psci_vector @ dabort
  31. b default_psci_vector @ hyp
  32. b default_psci_vector @ irq
  33. b psci_fiq_enter @ fiq
  34. ENTRY(psci_fiq_enter)
  35. movs pc, lr
  36. ENDPROC(psci_fiq_enter)
  37. .weak psci_fiq_enter
  38. ENTRY(default_psci_vector)
  39. movs pc, lr
  40. ENDPROC(default_psci_vector)
  41. .weak default_psci_vector
  42. ENTRY(psci_cpu_suspend)
  43. ENTRY(psci_cpu_off)
  44. ENTRY(psci_cpu_on)
  45. ENTRY(psci_migrate)
  46. mov r0, #ARM_PSCI_RET_NI @ Return -1 (Not Implemented)
  47. mov pc, lr
  48. ENDPROC(psci_migrate)
  49. ENDPROC(psci_cpu_on)
  50. ENDPROC(psci_cpu_off)
  51. ENDPROC(psci_cpu_suspend)
  52. .weak psci_cpu_suspend
  53. .weak psci_cpu_off
  54. .weak psci_cpu_on
  55. .weak psci_migrate
  56. _psci_table:
  57. .word ARM_PSCI_FN_CPU_SUSPEND
  58. .word psci_cpu_suspend
  59. .word ARM_PSCI_FN_CPU_OFF
  60. .word psci_cpu_off
  61. .word ARM_PSCI_FN_CPU_ON
  62. .word psci_cpu_on
  63. .word ARM_PSCI_FN_MIGRATE
  64. .word psci_migrate
  65. .word 0
  66. .word 0
  67. _smc_psci:
  68. push {r4-r7,lr}
  69. @ Switch to secure
  70. mrc p15, 0, r7, c1, c1, 0
  71. bic r4, r7, #1
  72. mcr p15, 0, r4, c1, c1, 0
  73. isb
  74. adr r4, _psci_table
  75. 1: ldr r5, [r4] @ Load PSCI function ID
  76. ldr r6, [r4, #4] @ Load target PC
  77. cmp r5, #0 @ If reach the end, bail out
  78. moveq r0, #ARM_PSCI_RET_INVAL @ Return -2 (Invalid)
  79. beq 2f
  80. cmp r0, r5 @ If not matching, try next entry
  81. addne r4, r4, #8
  82. bne 1b
  83. blx r6 @ Execute PSCI function
  84. @ Switch back to non-secure
  85. 2: mcr p15, 0, r7, c1, c1, 0
  86. pop {r4-r7, lr}
  87. movs pc, lr @ Return to the kernel
  88. @ Requires dense and single-cluster CPU ID space
  89. ENTRY(psci_get_cpu_id)
  90. mrc p15, 0, r0, c0, c0, 5 /* read MPIDR */
  91. and r0, r0, #0xff /* return CPU ID in cluster */
  92. bx lr
  93. ENDPROC(psci_get_cpu_id)
  94. .weak psci_get_cpu_id
  95. /* Imported from Linux kernel */
  96. LENTRY(v7_flush_dcache_all)
  97. dmb @ ensure ordering with previous memory accesses
  98. mrc p15, 1, r0, c0, c0, 1 @ read clidr
  99. ands r3, r0, #0x7000000 @ extract loc from clidr
  100. mov r3, r3, lsr #23 @ left align loc bit field
  101. beq finished @ if loc is 0, then no need to clean
  102. mov r10, #0 @ start clean at cache level 0
  103. flush_levels:
  104. add r2, r10, r10, lsr #1 @ work out 3x current cache level
  105. mov r1, r0, lsr r2 @ extract cache type bits from clidr
  106. and r1, r1, #7 @ mask of the bits for current cache only
  107. cmp r1, #2 @ see what cache we have at this level
  108. blt skip @ skip if no cache, or just i-cache
  109. mrs r9, cpsr @ make cssr&csidr read atomic
  110. mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  111. isb @ isb to sych the new cssr&csidr
  112. mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
  113. msr cpsr_c, r9
  114. and r2, r1, #7 @ extract the length of the cache lines
  115. add r2, r2, #4 @ add 4 (line length offset)
  116. ldr r4, =0x3ff
  117. ands r4, r4, r1, lsr #3 @ find maximum number on the way size
  118. clz r5, r4 @ find bit position of way size increment
  119. ldr r7, =0x7fff
  120. ands r7, r7, r1, lsr #13 @ extract max number of the index size
  121. loop1:
  122. mov r9, r7 @ create working copy of max index
  123. loop2:
  124. orr r11, r10, r4, lsl r5 @ factor way and cache number into r11
  125. orr r11, r11, r9, lsl r2 @ factor index number into r11
  126. mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
  127. subs r9, r9, #1 @ decrement the index
  128. bge loop2
  129. subs r4, r4, #1 @ decrement the way
  130. bge loop1
  131. skip:
  132. add r10, r10, #2 @ increment cache number
  133. cmp r3, r10
  134. bgt flush_levels
  135. finished:
  136. mov r10, #0 @ swith back to cache level 0
  137. mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  138. dsb st
  139. isb
  140. bx lr
  141. ENDPROC(v7_flush_dcache_all)
  142. ENTRY(psci_disable_smp)
  143. mrc p15, 0, r0, c1, c0, 1 @ ACTLR
  144. bic r0, r0, #(1 << 6) @ Clear SMP bit
  145. mcr p15, 0, r0, c1, c0, 1 @ ACTLR
  146. isb
  147. dsb
  148. bx lr
  149. ENDPROC(psci_disable_smp)
  150. .weak psci_disable_smp
  151. ENTRY(psci_enable_smp)
  152. mrc p15, 0, r0, c1, c0, 1 @ ACTLR
  153. orr r0, r0, #(1 << 6) @ Set SMP bit
  154. mcr p15, 0, r0, c1, c0, 1 @ ACTLR
  155. isb
  156. bx lr
  157. ENDPROC(psci_enable_smp)
  158. .weak psci_enable_smp
  159. ENTRY(psci_cpu_off_common)
  160. push {lr}
  161. mrc p15, 0, r0, c1, c0, 0 @ SCTLR
  162. bic r0, r0, #(1 << 2) @ Clear C bit
  163. mcr p15, 0, r0, c1, c0, 0 @ SCTLR
  164. isb
  165. dsb
  166. bl v7_flush_dcache_all
  167. clrex @ Why???
  168. bl psci_disable_smp
  169. pop {lr}
  170. bx lr
  171. ENDPROC(psci_cpu_off_common)
  172. @ expects CPU ID in r0 and returns stack top in r0
  173. ENTRY(psci_get_cpu_stack_top)
  174. mov r5, #0x400 @ 1kB of stack per CPU
  175. mul r0, r0, r5
  176. ldr r5, =psci_text_end @ end of monitor text
  177. add r5, r5, #0x2000 @ Skip two pages
  178. lsr r5, r5, #12 @ Align to start of page
  179. lsl r5, r5, #12
  180. sub r5, r5, #4 @ reserve 1 word for target PC
  181. sub r0, r5, r0 @ here's our stack!
  182. bx lr
  183. ENDPROC(psci_get_cpu_stack_top)
  184. ENTRY(psci_cpu_entry)
  185. bl psci_enable_smp
  186. bl _nonsec_init
  187. bl psci_get_cpu_id @ CPU ID => r0
  188. bl psci_get_cpu_stack_top @ stack top => r0
  189. ldr r0, [r0] @ target PC at stack top
  190. b _do_nonsec_entry
  191. ENDPROC(psci_cpu_entry)
  192. .popsection