system.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * linux/include/asm-arm/proc-armv/system.h
  3. *
  4. * Copyright (C) 1996 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef __ASM_PROC_SYSTEM_H
  11. #define __ASM_PROC_SYSTEM_H
  12. /*
  13. * Save the current interrupt enable state & disable IRQs
  14. */
  15. #define local_irq_save(x) \
  16. ({ \
  17. unsigned long temp; \
  18. __asm__ __volatile__( \
  19. "mrs %0, cpsr @ local_irq_save\n" \
  20. " orr %1, %0, #128\n" \
  21. " msr cpsr_c, %1" \
  22. : "=r" (x), "=r" (temp) \
  23. : \
  24. : "memory"); \
  25. })
  26. /*
  27. * Enable IRQs
  28. */
  29. #define local_irq_enable() \
  30. ({ \
  31. unsigned long temp; \
  32. __asm__ __volatile__( \
  33. "mrs %0, cpsr @ local_irq_enable\n" \
  34. " bic %0, %0, #128\n" \
  35. " msr cpsr_c, %0" \
  36. : "=r" (temp) \
  37. : \
  38. : "memory"); \
  39. })
  40. /*
  41. * Disable IRQs
  42. */
  43. #define local_irq_disable() \
  44. ({ \
  45. unsigned long temp; \
  46. __asm__ __volatile__( \
  47. "mrs %0, cpsr @ local_irq_disable\n" \
  48. " orr %0, %0, #128\n" \
  49. " msr cpsr_c, %0" \
  50. : "=r" (temp) \
  51. : \
  52. : "memory"); \
  53. })
  54. /*
  55. * Enable FIQs
  56. */
  57. #define __stf() \
  58. ({ \
  59. unsigned long temp; \
  60. __asm__ __volatile__( \
  61. "mrs %0, cpsr @ stf\n" \
  62. " bic %0, %0, #64\n" \
  63. " msr cpsr_c, %0" \
  64. : "=r" (temp) \
  65. : \
  66. : "memory"); \
  67. })
  68. /*
  69. * Disable FIQs
  70. */
  71. #define __clf() \
  72. ({ \
  73. unsigned long temp; \
  74. __asm__ __volatile__( \
  75. "mrs %0, cpsr @ clf\n" \
  76. " orr %0, %0, #64\n" \
  77. " msr cpsr_c, %0" \
  78. : "=r" (temp) \
  79. : \
  80. : "memory"); \
  81. })
  82. /*
  83. * Save the current interrupt enable state.
  84. */
  85. #define local_save_flags(x) \
  86. ({ \
  87. __asm__ __volatile__( \
  88. "mrs %0, cpsr @ local_save_flags\n" \
  89. : "=r" (x) \
  90. : \
  91. : "memory"); \
  92. })
  93. /*
  94. * restore saved IRQ & FIQ state
  95. */
  96. #define local_irq_restore(x) \
  97. __asm__ __volatile__( \
  98. "msr cpsr_c, %0 @ local_irq_restore\n" \
  99. : \
  100. : "r" (x) \
  101. : "memory")
  102. #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
  103. /*
  104. * On the StrongARM, "swp" is terminally broken since it bypasses the
  105. * cache totally. This means that the cache becomes inconsistent, and,
  106. * since we use normal loads/stores as well, this is really bad.
  107. * Typically, this causes oopsen in filp_close, but could have other,
  108. * more disasterous effects. There are two work-arounds:
  109. * 1. Disable interrupts and emulate the atomic swap
  110. * 2. Clean the cache, perform atomic swap, flush the cache
  111. *
  112. * We choose (1) since its the "easiest" to achieve here and is not
  113. * dependent on the processor type.
  114. */
  115. #define swp_is_buggy
  116. #endif
  117. static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  118. {
  119. extern void __bad_xchg(volatile void *, int);
  120. unsigned long ret;
  121. #ifdef swp_is_buggy
  122. unsigned long flags;
  123. #endif
  124. switch (size) {
  125. #ifdef swp_is_buggy
  126. case 1:
  127. local_irq_save(flags);
  128. ret = *(volatile unsigned char *)ptr;
  129. *(volatile unsigned char *)ptr = x;
  130. local_irq_restore(flags);
  131. break;
  132. case 4:
  133. local_irq_save(flags);
  134. ret = *(volatile unsigned long *)ptr;
  135. *(volatile unsigned long *)ptr = x;
  136. local_irq_restore(flags);
  137. break;
  138. #else
  139. case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
  140. : "=&r" (ret)
  141. : "r" (x), "r" (ptr)
  142. : "memory");
  143. break;
  144. case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
  145. : "=&r" (ret)
  146. : "r" (x), "r" (ptr)
  147. : "memory");
  148. break;
  149. #endif
  150. default: __bad_xchg(ptr, size), ret = 0;
  151. }
  152. return ret;
  153. }
  154. #endif