macro.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /*
  2. * include/asm-arm/macro.h
  3. *
  4. * Copyright (C) 2009 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
  5. *
  6. * SPDX-License-Identifier: GPL-2.0+
  7. */
  8. #ifndef __ASM_ARM_MACRO_H__
  9. #define __ASM_ARM_MACRO_H__
  10. #ifdef __ASSEMBLY__
  11. /*
  12. * These macros provide a convenient way to write 8, 16 and 32 bit data
  13. * to any address.
  14. * Registers r4 and r5 are used, any data in these registers are
  15. * overwritten by the macros.
  16. * The macros are valid for any ARM architecture, they do not implement
  17. * any memory barriers so caution is recommended when using these when the
  18. * caches are enabled or on a multi-core system.
  19. */
  20. .macro write32, addr, data
  21. ldr r4, =\addr
  22. ldr r5, =\data
  23. str r5, [r4]
  24. .endm
  25. .macro write16, addr, data
  26. ldr r4, =\addr
  27. ldrh r5, =\data
  28. strh r5, [r4]
  29. .endm
  30. .macro write8, addr, data
  31. ldr r4, =\addr
  32. ldrb r5, =\data
  33. strb r5, [r4]
  34. .endm
  35. /*
  36. * This macro generates a loop that can be used for delays in the code.
  37. * Register r4 is used, any data in this register is overwritten by the
  38. * macro.
  39. * The macro is valid for any ARM architeture. The actual time spent in the
  40. * loop will vary from CPU to CPU though.
  41. */
  42. .macro wait_timer, time
  43. ldr r4, =\time
  44. 1:
  45. nop
  46. subs r4, r4, #1
  47. bcs 1b
  48. .endm
  49. #ifdef CONFIG_ARM64
  50. /*
  51. * Register aliases.
  52. */
  53. lr .req x30
  54. /*
  55. * Branch according to exception level
  56. */
  57. .macro switch_el, xreg, el3_label, el2_label, el1_label
  58. mrs \xreg, CurrentEL
  59. cmp \xreg, 0xc
  60. b.eq \el3_label
  61. cmp \xreg, 0x8
  62. b.eq \el2_label
  63. cmp \xreg, 0x4
  64. b.eq \el1_label
  65. .endm
  66. /*
  67. * Branch if current processor is a Cortex-A57 core.
  68. */
  69. .macro branch_if_a57_core, xreg, a57_label
  70. mrs \xreg, midr_el1
  71. lsr \xreg, \xreg, #4
  72. and \xreg, \xreg, #0x00000FFF
  73. cmp \xreg, #0xD07 /* Cortex-A57 MPCore processor. */
  74. b.eq \a57_label
  75. .endm
  76. /*
  77. * Branch if current processor is a Cortex-A53 core.
  78. */
  79. .macro branch_if_a53_core, xreg, a53_label
  80. mrs \xreg, midr_el1
  81. lsr \xreg, \xreg, #4
  82. and \xreg, \xreg, #0x00000FFF
  83. cmp \xreg, #0xD03 /* Cortex-A53 MPCore processor. */
  84. b.eq \a53_label
  85. .endm
  86. /*
  87. * Branch if current processor is a slave,
  88. * choose processor with all zero affinity value as the master.
  89. */
  90. .macro branch_if_slave, xreg, slave_label
  91. #ifdef CONFIG_ARMV8_MULTIENTRY
  92. /* NOTE: MPIDR handling will be erroneous on multi-cluster machines */
  93. mrs \xreg, mpidr_el1
  94. tst \xreg, #0xff /* Test Affinity 0 */
  95. b.ne \slave_label
  96. lsr \xreg, \xreg, #8
  97. tst \xreg, #0xff /* Test Affinity 1 */
  98. b.ne \slave_label
  99. lsr \xreg, \xreg, #8
  100. tst \xreg, #0xff /* Test Affinity 2 */
  101. b.ne \slave_label
  102. lsr \xreg, \xreg, #16
  103. tst \xreg, #0xff /* Test Affinity 3 */
  104. b.ne \slave_label
  105. #endif
  106. .endm
  107. /*
  108. * Branch if current processor is a master,
  109. * choose processor with all zero affinity value as the master.
  110. */
  111. .macro branch_if_master, xreg1, xreg2, master_label
  112. #ifdef CONFIG_ARMV8_MULTIENTRY
  113. /* NOTE: MPIDR handling will be erroneous on multi-cluster machines */
  114. mrs \xreg1, mpidr_el1
  115. lsr \xreg2, \xreg1, #32
  116. lsl \xreg1, \xreg1, #40
  117. lsr \xreg1, \xreg1, #40
  118. orr \xreg1, \xreg1, \xreg2
  119. cbz \xreg1, \master_label
  120. #else
  121. b \master_label
  122. #endif
  123. .endm
  124. .macro armv8_switch_to_el2_m, xreg1
  125. /* 64bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */
  126. mov \xreg1, #0x5b1
  127. msr scr_el3, \xreg1
  128. msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
  129. mov \xreg1, #0x33ff
  130. msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */
  131. /* Initialize Generic Timers */
  132. msr cntvoff_el2, xzr
  133. /* Initialize SCTLR_EL2
  134. *
  135. * setting RES1 bits (29,28,23,22,18,16,11,5,4) to 1
  136. * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) +
  137. * EE,WXN,I,SA,C,A,M to 0
  138. */
  139. mov \xreg1, #0x0830
  140. movk \xreg1, #0x30C5, lsl #16
  141. msr sctlr_el2, \xreg1
  142. /* Return to the EL2_SP2 mode from EL3 */
  143. mov \xreg1, sp
  144. msr sp_el2, \xreg1 /* Migrate SP */
  145. mrs \xreg1, vbar_el3
  146. msr vbar_el2, \xreg1 /* Migrate VBAR */
  147. mov \xreg1, #0x3c9
  148. msr spsr_el3, \xreg1 /* EL2_SP2 | D | A | I | F */
  149. msr elr_el3, lr
  150. eret
  151. .endm
  152. .macro armv8_switch_to_el1_m, xreg1, xreg2
  153. /* Initialize Generic Timers */
  154. mrs \xreg1, cnthctl_el2
  155. orr \xreg1, \xreg1, #0x3 /* Enable EL1 access to timers */
  156. msr cnthctl_el2, \xreg1
  157. msr cntvoff_el2, xzr
  158. /* Initilize MPID/MPIDR registers */
  159. mrs \xreg1, midr_el1
  160. mrs \xreg2, mpidr_el1
  161. msr vpidr_el2, \xreg1
  162. msr vmpidr_el2, \xreg2
  163. /* Disable coprocessor traps */
  164. mov \xreg1, #0x33ff
  165. msr cptr_el2, \xreg1 /* Disable coprocessor traps to EL2 */
  166. msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */
  167. mov \xreg1, #3 << 20
  168. msr cpacr_el1, \xreg1 /* Enable FP/SIMD at EL1 */
  169. /* Initialize HCR_EL2 */
  170. mov \xreg1, #(1 << 31) /* 64bit EL1 */
  171. orr \xreg1, \xreg1, #(1 << 29) /* Disable HVC */
  172. msr hcr_el2, \xreg1
  173. /* SCTLR_EL1 initialization
  174. *
  175. * setting RES1 bits (29,28,23,22,20,11) to 1
  176. * and RES0 bits (31,30,27,21,17,13,10,6) +
  177. * UCI,EE,EOE,WXN,nTWE,nTWI,UCT,DZE,I,UMA,SED,ITD,
  178. * CP15BEN,SA0,SA,C,A,M to 0
  179. */
  180. mov \xreg1, #0x0800
  181. movk \xreg1, #0x30d0, lsl #16
  182. msr sctlr_el1, \xreg1
  183. /* Return to the EL1_SP1 mode from EL2 */
  184. mov \xreg1, sp
  185. msr sp_el1, \xreg1 /* Migrate SP */
  186. mrs \xreg1, vbar_el2
  187. msr vbar_el1, \xreg1 /* Migrate VBAR */
  188. mov \xreg1, #0x3c5
  189. msr spsr_el2, \xreg1 /* EL1_SP1 | D | A | I | F */
  190. msr elr_el2, lr
  191. eret
  192. .endm
  193. #if defined(CONFIG_GICV3)
  194. .macro gic_wait_for_interrupt_m xreg1
  195. 0 : wfi
  196. mrs \xreg1, ICC_IAR1_EL1
  197. msr ICC_EOIR1_EL1, \xreg1
  198. cbnz \xreg1, 0b
  199. .endm
  200. #elif defined(CONFIG_GICV2)
  201. .macro gic_wait_for_interrupt_m xreg1, wreg2
  202. 0 : wfi
  203. ldr \wreg2, [\xreg1, GICC_AIAR]
  204. str \wreg2, [\xreg1, GICC_AEOIR]
  205. and \wreg2, \wreg2, #0x3ff
  206. cbnz \wreg2, 0b
  207. .endm
  208. #endif
  209. #endif /* CONFIG_ARM64 */
  210. #endif /* __ASSEMBLY__ */
  211. #endif /* __ASM_ARM_MACRO_H__ */