system.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * linux/include/asm-arm/proc-armv/system.h
  3. *
  4. * Copyright (C) 1996 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef __ASM_PROC_SYSTEM_H
  11. #define __ASM_PROC_SYSTEM_H
  12. /*
  13. * Save the current interrupt enable state & disable IRQs
  14. */
  15. #ifdef CONFIG_ARM64
  16. /*
  17. * Save the current interrupt enable state
  18. * and disable IRQs/FIQs
  19. */
  20. #define local_irq_save(flags) \
  21. ({ \
  22. asm volatile( \
  23. "mrs %0, daif\n" \
  24. "msr daifset, #3" \
  25. : "=r" (flags) \
  26. : \
  27. : "memory"); \
  28. })
  29. /*
  30. * restore saved IRQ & FIQ state
  31. */
  32. #define local_irq_restore(flags) \
  33. ({ \
  34. asm volatile( \
  35. "msr daif, %0" \
  36. : \
  37. : "r" (flags) \
  38. : "memory"); \
  39. })
  40. /*
  41. * Enable IRQs/FIQs
  42. */
  43. #define local_irq_enable() \
  44. ({ \
  45. asm volatile( \
  46. "msr daifclr, #3" \
  47. : \
  48. : \
  49. : "memory"); \
  50. })
  51. /*
  52. * Disable IRQs/FIQs
  53. */
  54. #define local_irq_disable() \
  55. ({ \
  56. asm volatile( \
  57. "msr daifset, #3" \
  58. : \
  59. : \
  60. : "memory"); \
  61. })
  62. #else /* CONFIG_ARM64 */
  63. #define local_irq_save(x) \
  64. ({ \
  65. unsigned long temp; \
  66. __asm__ __volatile__( \
  67. "mrs %0, cpsr @ local_irq_save\n" \
  68. " orr %1, %0, #128\n" \
  69. " msr cpsr_c, %1" \
  70. : "=r" (x), "=r" (temp) \
  71. : \
  72. : "memory"); \
  73. })
  74. /*
  75. * Enable IRQs
  76. */
  77. #define local_irq_enable() \
  78. ({ \
  79. unsigned long temp; \
  80. __asm__ __volatile__( \
  81. "mrs %0, cpsr @ local_irq_enable\n" \
  82. " bic %0, %0, #128\n" \
  83. " msr cpsr_c, %0" \
  84. : "=r" (temp) \
  85. : \
  86. : "memory"); \
  87. })
  88. /*
  89. * Disable IRQs
  90. */
  91. #define local_irq_disable() \
  92. ({ \
  93. unsigned long temp; \
  94. __asm__ __volatile__( \
  95. "mrs %0, cpsr @ local_irq_disable\n" \
  96. " orr %0, %0, #128\n" \
  97. " msr cpsr_c, %0" \
  98. : "=r" (temp) \
  99. : \
  100. : "memory"); \
  101. })
  102. /*
  103. * Enable FIQs
  104. */
  105. #define __stf() \
  106. ({ \
  107. unsigned long temp; \
  108. __asm__ __volatile__( \
  109. "mrs %0, cpsr @ stf\n" \
  110. " bic %0, %0, #64\n" \
  111. " msr cpsr_c, %0" \
  112. : "=r" (temp) \
  113. : \
  114. : "memory"); \
  115. })
  116. /*
  117. * Disable FIQs
  118. */
  119. #define __clf() \
  120. ({ \
  121. unsigned long temp; \
  122. __asm__ __volatile__( \
  123. "mrs %0, cpsr @ clf\n" \
  124. " orr %0, %0, #64\n" \
  125. " msr cpsr_c, %0" \
  126. : "=r" (temp) \
  127. : \
  128. : "memory"); \
  129. })
  130. /*
  131. * Save the current interrupt enable state.
  132. */
  133. #define local_save_flags(x) \
  134. ({ \
  135. __asm__ __volatile__( \
  136. "mrs %0, cpsr @ local_save_flags\n" \
  137. : "=r" (x) \
  138. : \
  139. : "memory"); \
  140. })
  141. /*
  142. * restore saved IRQ & FIQ state
  143. */
  144. #define local_irq_restore(x) \
  145. __asm__ __volatile__( \
  146. "msr cpsr_c, %0 @ local_irq_restore\n" \
  147. : \
  148. : "r" (x) \
  149. : "memory")
  150. #endif /* CONFIG_ARM64 */
  151. #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) || \
  152. defined(CONFIG_ARM64)
  153. /*
  154. * On the StrongARM, "swp" is terminally broken since it bypasses the
  155. * cache totally. This means that the cache becomes inconsistent, and,
  156. * since we use normal loads/stores as well, this is really bad.
  157. * Typically, this causes oopsen in filp_close, but could have other,
  158. * more disasterous effects. There are two work-arounds:
  159. * 1. Disable interrupts and emulate the atomic swap
  160. * 2. Clean the cache, perform atomic swap, flush the cache
  161. *
  162. * We choose (1) since its the "easiest" to achieve here and is not
  163. * dependent on the processor type.
  164. */
  165. #define swp_is_buggy
  166. #endif
  167. static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  168. {
  169. extern void __bad_xchg(volatile void *, int);
  170. unsigned long ret;
  171. #ifdef swp_is_buggy
  172. unsigned long flags;
  173. #endif
  174. switch (size) {
  175. #ifdef swp_is_buggy
  176. case 1:
  177. local_irq_save(flags);
  178. ret = *(volatile unsigned char *)ptr;
  179. *(volatile unsigned char *)ptr = x;
  180. local_irq_restore(flags);
  181. break;
  182. case 4:
  183. local_irq_save(flags);
  184. ret = *(volatile unsigned long *)ptr;
  185. *(volatile unsigned long *)ptr = x;
  186. local_irq_restore(flags);
  187. break;
  188. #else
  189. case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
  190. : "=&r" (ret)
  191. : "r" (x), "r" (ptr)
  192. : "memory");
  193. break;
  194. case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
  195. : "=&r" (ret)
  196. : "r" (x), "r" (ptr)
  197. : "memory");
  198. break;
  199. #endif
  200. default: __bad_xchg(ptr, size), ret = 0;
  201. }
  202. return ret;
  203. }
  204. #endif