io.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #ifndef __ASM_ARC_IO_H
  7. #define __ASM_ARC_IO_H
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #ifdef CONFIG_ISA_ARCV2
  11. /*
  12. * ARCv2 based HS38 cores are in-order issue, but still weakly ordered
  13. * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
  14. *
  15. * Explicit barrier provided by DMB instruction
  16. * - Operand supports fine grained load/store/load+store semantics
  17. * - Ensures that selected memory operation issued before it will complete
  18. * before any subsequent memory operation of same type
  19. * - DMB guarantees SMP as well as local barrier semantics
  20. * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
  21. * UP: barrier(), SMP: smp_*mb == *mb)
  22. * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
  23. * in the general case. Plus it only provides full barrier.
  24. */
  25. #define mb() asm volatile("dmb 3\n" : : : "memory")
  26. #define rmb() asm volatile("dmb 1\n" : : : "memory")
  27. #define wmb() asm volatile("dmb 2\n" : : : "memory")
  28. #else
  29. /*
  30. * ARCompact based cores (ARC700) only have SYNC instruction which is super
  31. * heavy weight as it flushes the pipeline as well.
  32. * There are no real SMP implementations of such cores.
  33. */
  34. #define mb() asm volatile("sync\n" : : : "memory")
  35. #endif
  36. #ifdef CONFIG_ISA_ARCV2
  37. #define __iormb() rmb()
  38. #define __iowmb() wmb()
  39. #else
  40. #define __iormb() do { } while (0)
  41. #define __iowmb() do { } while (0)
  42. #endif
  43. static inline void sync(void)
  44. {
  45. /* Not yet implemented */
  46. }
  47. static inline u8 __raw_readb(const volatile void __iomem *addr)
  48. {
  49. u8 b;
  50. __asm__ __volatile__("ldb%U1 %0, %1\n"
  51. : "=r" (b)
  52. : "m" (*(volatile u8 __force *)addr)
  53. : "memory");
  54. return b;
  55. }
  56. static inline u16 __raw_readw(const volatile void __iomem *addr)
  57. {
  58. u16 s;
  59. __asm__ __volatile__("ldw%U1 %0, %1\n"
  60. : "=r" (s)
  61. : "m" (*(volatile u16 __force *)addr)
  62. : "memory");
  63. return s;
  64. }
  65. static inline u32 __raw_readl(const volatile void __iomem *addr)
  66. {
  67. u32 w;
  68. __asm__ __volatile__("ld%U1 %0, %1\n"
  69. : "=r" (w)
  70. : "m" (*(volatile u32 __force *)addr)
  71. : "memory");
  72. return w;
  73. }
  74. static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
  75. {
  76. __asm__ __volatile__("stb%U1 %0, %1\n"
  77. :
  78. : "r" (b), "m" (*(volatile u8 __force *)addr)
  79. : "memory");
  80. }
  81. static inline void __raw_writew(u16 s, volatile void __iomem *addr)
  82. {
  83. __asm__ __volatile__("stw%U1 %0, %1\n"
  84. :
  85. : "r" (s), "m" (*(volatile u16 __force *)addr)
  86. : "memory");
  87. }
  88. static inline void __raw_writel(u32 w, volatile void __iomem *addr)
  89. {
  90. __asm__ __volatile__("st%U1 %0, %1\n"
  91. :
  92. : "r" (w), "m" (*(volatile u32 __force *)addr)
  93. : "memory");
  94. }
  95. static inline int __raw_readsb(unsigned int addr, void *data, int bytelen)
  96. {
  97. __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
  98. "sub.f r2, r2, 1\n"
  99. "bnz.d 1b\n"
  100. "stb.ab r8, [r1, 1]\n"
  101. :
  102. : "r" (addr), "r" (data), "r" (bytelen)
  103. : "r8");
  104. return bytelen;
  105. }
  106. static inline int __raw_readsw(unsigned int addr, void *data, int wordlen)
  107. {
  108. __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
  109. "sub.f r2, r2, 1\n"
  110. "bnz.d 1b\n"
  111. "stw.ab r8, [r1, 2]\n"
  112. :
  113. : "r" (addr), "r" (data), "r" (wordlen)
  114. : "r8");
  115. return wordlen;
  116. }
  117. static inline int __raw_readsl(unsigned int addr, void *data, int longlen)
  118. {
  119. __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
  120. "sub.f r2, r2, 1\n"
  121. "bnz.d 1b\n"
  122. "st.ab r8, [r1, 4]\n"
  123. :
  124. : "r" (addr), "r" (data), "r" (longlen)
  125. : "r8");
  126. return longlen;
  127. }
  128. static inline int __raw_writesb(unsigned int addr, void *data, int bytelen)
  129. {
  130. __asm__ __volatile__ ("1:ldb.ab r8, [r1, 1]\n"
  131. "sub.f r2, r2, 1\n"
  132. "bnz.d 1b\n"
  133. "st.di r8, [r0, 0]\n"
  134. :
  135. : "r" (addr), "r" (data), "r" (bytelen)
  136. : "r8");
  137. return bytelen;
  138. }
  139. static inline int __raw_writesw(unsigned int addr, void *data, int wordlen)
  140. {
  141. __asm__ __volatile__ ("1:ldw.ab r8, [r1, 2]\n"
  142. "sub.f r2, r2, 1\n"
  143. "bnz.d 1b\n"
  144. "st.ab.di r8, [r0, 0]\n"
  145. :
  146. : "r" (addr), "r" (data), "r" (wordlen)
  147. : "r8");
  148. return wordlen;
  149. }
  150. static inline int __raw_writesl(unsigned int addr, void *data, int longlen)
  151. {
  152. __asm__ __volatile__ ("1:ld.ab r8, [r1, 4]\n"
  153. "sub.f r2, r2, 1\n"
  154. "bnz.d 1b\n"
  155. "st.ab.di r8, [r0, 0]\n"
  156. :
  157. : "r" (addr), "r" (data), "r" (longlen)
  158. : "r8");
  159. return longlen;
  160. }
  161. /*
  162. * MMIO can also get buffered/optimized in micro-arch, so barriers needed
  163. * Based on ARM model for the typical use case
  164. *
  165. * <ST [DMA buffer]>
  166. * <writel MMIO "go" reg>
  167. * or:
  168. * <readl MMIO "status" reg>
  169. * <LD [DMA buffer]>
  170. *
  171. * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
  172. */
  173. #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
  174. #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
  175. #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
  176. #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
  177. #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
  178. #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
  179. /*
  180. * Relaxed API for drivers which can handle barrier ordering themselves
  181. *
  182. * Also these are defined to perform little endian accesses.
  183. * To provide the typical device register semantics of fixed endian,
  184. * swap the byte order for Big Endian
  185. *
  186. * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
  187. */
  188. #define readb_relaxed(c) __raw_readb(c)
  189. #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
  190. __raw_readw(c)); __r; })
  191. #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
  192. __raw_readl(c)); __r; })
  193. #define writeb_relaxed(v,c) __raw_writeb(v,c)
  194. #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
  195. #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
  196. #define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
  197. #define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
  198. #define out_le32(a, v) out_arch(l, le32, a, v)
  199. #define out_le16(a, v) out_arch(w, le16, a, v)
  200. #define in_le32(a) in_arch(l, le32, a)
  201. #define in_le16(a) in_arch(w, le16, a)
  202. #define out_be32(a, v) out_arch(l, be32, a, v)
  203. #define out_be16(a, v) out_arch(w, be16, a, v)
  204. #define in_be32(a) in_arch(l, be32, a)
  205. #define in_be16(a) in_arch(w, be16, a)
  206. #define out_8(a, v) __raw_writeb(v, a)
  207. #define in_8(a) __raw_readb(a)
  208. /*
  209. * Clear and set bits in one shot. These macros can be used to clear and
  210. * set multiple bits in a register using a single call. These macros can
  211. * also be used to set a multiple-bit bit pattern using a mask, by
  212. * specifying the mask in the 'clear' parameter and the new bit pattern
  213. * in the 'set' parameter.
  214. */
  215. #define clrbits(type, addr, clear) \
  216. out_##type((addr), in_##type(addr) & ~(clear))
  217. #define setbits(type, addr, set) \
  218. out_##type((addr), in_##type(addr) | (set))
  219. #define clrsetbits(type, addr, clear, set) \
  220. out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
  221. #define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
  222. #define setbits_be32(addr, set) setbits(be32, addr, set)
  223. #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
  224. #define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
  225. #define setbits_le32(addr, set) setbits(le32, addr, set)
  226. #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
  227. #define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
  228. #define setbits_be16(addr, set) setbits(be16, addr, set)
  229. #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
  230. #define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
  231. #define setbits_le16(addr, set) setbits(le16, addr, set)
  232. #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
  233. #define clrbits_8(addr, clear) clrbits(8, addr, clear)
  234. #define setbits_8(addr, set) setbits(8, addr, set)
  235. #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
  236. #include <asm-generic/io.h>
  237. #endif /* __ASM_ARC_IO_H */