io.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2017 Andes Technology Corporation
  4. * Rick Chen, Andes Technology Corporation <rick@andestech.com>
  5. *
  6. */
  7. #ifndef __ASM_RISCV_IO_H
  8. #define __ASM_RISCV_IO_H
  9. #ifdef __KERNEL__
  10. #include <linux/types.h>
  11. #include <asm/byteorder.h>
  12. static inline void sync(void)
  13. {
  14. }
  15. /*
  16. * Given a physical address and a length, return a virtual address
  17. * that can be used to access the memory range with the caching
  18. * properties specified by "flags".
  19. */
  20. #define MAP_NOCACHE (0)
  21. #define MAP_WRCOMBINE (0)
  22. #define MAP_WRBACK (0)
  23. #define MAP_WRTHROUGH (0)
  24. #ifdef CONFIG_ARCH_MAP_SYSMEM
  25. static inline void *map_sysmem(phys_addr_t paddr, unsigned long len)
  26. {
  27. if (paddr < PHYS_SDRAM_0_SIZE + PHYS_SDRAM_1_SIZE)
  28. paddr = paddr | 0x40000000;
  29. return (void *)(uintptr_t)paddr;
  30. }
  31. static inline void *unmap_sysmem(const void *vaddr)
  32. {
  33. phys_addr_t paddr = (phys_addr_t)vaddr;
  34. paddr = paddr & ~0x40000000;
  35. return (void *)(uintptr_t)paddr;
  36. }
  37. static inline phys_addr_t map_to_sysmem(const void *ptr)
  38. {
  39. return (phys_addr_t)(uintptr_t)ptr;
  40. }
  41. #endif
  42. static inline void *
  43. map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags)
  44. {
  45. return (void *)paddr;
  46. }
  47. /*
  48. * Take down a mapping set up by map_physmem().
  49. */
  50. static inline void unmap_physmem(void *vaddr, unsigned long flags)
  51. {
  52. }
  53. static inline phys_addr_t virt_to_phys(void *vaddr)
  54. {
  55. return (phys_addr_t)(vaddr);
  56. }
  57. /*
  58. * Generic virtual read/write. Note that we don't support half-word
  59. * read/writes. We define __arch_*[bl] here, and leave __arch_*w
  60. * to the architecture specific code.
  61. */
  62. #define __arch_getb(a) (*(unsigned char *)(a))
  63. #define __arch_getw(a) (*(unsigned short *)(a))
  64. #define __arch_getl(a) (*(unsigned int *)(a))
  65. #define __arch_getq(a) (*(unsigned long *)(a))
  66. #define __arch_putb(v, a) (*(unsigned char *)(a) = (v))
  67. #define __arch_putw(v, a) (*(unsigned short *)(a) = (v))
  68. #define __arch_putl(v, a) (*(unsigned int *)(a) = (v))
  69. #define __arch_putq(v, a) (*(unsigned long *)(a) = (v))
  70. #define __raw_writeb(v, a) __arch_putb(v, a)
  71. #define __raw_writew(v, a) __arch_putw(v, a)
  72. #define __raw_writel(v, a) __arch_putl(v, a)
  73. #define __raw_writeq(v, a) __arch_putq(v, a)
  74. #define __raw_readb(a) __arch_getb(a)
  75. #define __raw_readw(a) __arch_getw(a)
  76. #define __raw_readl(a) __arch_getl(a)
  77. #define __raw_readq(a) __arch_getq(a)
  78. /*
  79. * TODO: The kernel offers some more advanced versions of barriers, it might
  80. * have some advantages to use them instead of the simple one here.
  81. */
  82. #define dmb() __asm__ __volatile__ ("" : : : "memory")
  83. #define __iormb() dmb()
  84. #define __iowmb() dmb()
  85. static inline void writeb(u8 val, volatile void __iomem *addr)
  86. {
  87. __iowmb();
  88. __arch_putb(val, addr);
  89. }
  90. static inline void writew(u16 val, volatile void __iomem *addr)
  91. {
  92. __iowmb();
  93. __arch_putw(val, addr);
  94. }
  95. static inline void writel(u32 val, volatile void __iomem *addr)
  96. {
  97. __iowmb();
  98. __arch_putl(val, addr);
  99. }
  100. static inline void writeq(u64 val, volatile void __iomem *addr)
  101. {
  102. __iowmb();
  103. __arch_putq(val, addr);
  104. }
  105. static inline u8 readb(const volatile void __iomem *addr)
  106. {
  107. u8 val;
  108. val = __arch_getb(addr);
  109. __iormb();
  110. return val;
  111. }
  112. static inline u16 readw(const volatile void __iomem *addr)
  113. {
  114. u16 val;
  115. val = __arch_getw(addr);
  116. __iormb();
  117. return val;
  118. }
  119. static inline u32 readl(const volatile void __iomem *addr)
  120. {
  121. u32 val;
  122. val = __arch_getl(addr);
  123. __iormb();
  124. return val;
  125. }
  126. static inline u64 readq(const volatile void __iomem *addr)
  127. {
  128. u32 val;
  129. val = __arch_getq(addr);
  130. __iormb();
  131. return val;
  132. }
  133. /*
  134. * The compiler seems to be incapable of optimising constants
  135. * properly. Spell it out to the compiler in some cases.
  136. * These are only valid for small values of "off" (< 1<<12)
  137. */
  138. #define __raw_base_writeb(val, base, off) __arch_base_putb(val, base, off)
  139. #define __raw_base_writew(val, base, off) __arch_base_putw(val, base, off)
  140. #define __raw_base_writel(val, base, off) __arch_base_putl(val, base, off)
  141. #define __raw_base_readb(base, off) __arch_base_getb(base, off)
  142. #define __raw_base_readw(base, off) __arch_base_getw(base, off)
  143. #define __raw_base_readl(base, off) __arch_base_getl(base, off)
  144. #define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
  145. #define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
  146. #define out_le32(a, v) out_arch(l, le32, a, v)
  147. #define out_le16(a, v) out_arch(w, le16, a, v)
  148. #define in_le32(a) in_arch(l, le32, a)
  149. #define in_le16(a) in_arch(w, le16, a)
  150. #define out_be32(a, v) out_arch(l, be32, a, v)
  151. #define out_be16(a, v) out_arch(w, be16, a, v)
  152. #define in_be32(a) in_arch(l, be32, a)
  153. #define in_be16(a) in_arch(w, be16, a)
  154. #define out_8(a, v) __raw_writeb(v, a)
  155. #define in_8(a) __raw_readb(a)
  156. /*
  157. * Clear and set bits in one shot. These macros can be used to clear and
  158. * set multiple bits in a register using a single call. These macros can
  159. * also be used to set a multiple-bit bit pattern using a mask, by
  160. * specifying the mask in the 'clear' parameter and the new bit pattern
  161. * in the 'set' parameter.
  162. */
  163. #define clrbits(type, addr, clear) \
  164. out_##type((addr), in_##type(addr) & ~(clear))
  165. #define setbits(type, addr, set) \
  166. out_##type((addr), in_##type(addr) | (set))
  167. #define clrsetbits(type, addr, clear, set) \
  168. out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
  169. #define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
  170. #define setbits_be32(addr, set) setbits(be32, addr, set)
  171. #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
  172. #define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
  173. #define setbits_le32(addr, set) setbits(le32, addr, set)
  174. #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
  175. #define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
  176. #define setbits_be16(addr, set) setbits(be16, addr, set)
  177. #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
  178. #define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
  179. #define setbits_le16(addr, set) setbits(le16, addr, set)
  180. #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
  181. #define clrbits_8(addr, clear) clrbits(8, addr, clear)
  182. #define setbits_8(addr, set) setbits(8, addr, set)
  183. #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
  184. /*
  185. * Now, pick up the machine-defined IO definitions
  186. * #include <asm/arch/io.h>
  187. */
  188. /*
  189. * IO port access primitives
  190. * -------------------------
  191. *
  192. * The NDS32 doesn't have special IO access instructions just like ARM;
  193. * all IO is memory mapped.
  194. * Note that these are defined to perform little endian accesses
  195. * only. Their primary purpose is to access PCI and ISA peripherals.
  196. *
  197. * Note that for a big endian machine, this implies that the following
  198. * big endian mode connectivity is in place, as described by numerious
  199. * ARM documents:
  200. *
  201. * PCI: D0-D7 D8-D15 D16-D23 D24-D31
  202. * ARM: D24-D31 D16-D23 D8-D15 D0-D7
  203. *
  204. * The machine specific io.h include defines __io to translate an "IO"
  205. * address to a memory address.
  206. *
  207. * Note that we prevent GCC re-ordering or caching values in expressions
  208. * by introducing sequence points into the in*() definitions. Note that
  209. * __raw_* do not guarantee this behaviour.
  210. *
  211. * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
  212. */
  213. #ifdef __io
  214. #define outb(v, p) __raw_writeb(v, __io(p))
  215. #define outw(v, p) __raw_writew(cpu_to_le16(v), __io(p))
  216. #define outl(v, p) __raw_writel(cpu_to_le32(v), __io(p))
  217. #define inb(p) ({ unsigned int __v = __raw_readb(__io(p)); __v; })
  218. #define inw(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; })
  219. #define inl(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; })
  220. #define outsb(p, d, l) writesb(__io(p), d, l)
  221. #define outsw(p, d, l) writesw(__io(p), d, l)
  222. #define outsl(p, d, l) writesl(__io(p), d, l)
  223. #define insb(p, d, l) readsb(__io(p), d, l)
  224. #define insw(p, d, l) readsw(__io(p), d, l)
  225. #define insl(p, d, l) readsl(__io(p), d, l)
  226. static inline void readsb(unsigned int *addr, void *data, int bytelen)
  227. {
  228. unsigned char *ptr;
  229. unsigned char *ptr2;
  230. ptr = (unsigned char *)addr;
  231. ptr2 = (unsigned char *)data;
  232. while (bytelen) {
  233. *ptr2 = *ptr;
  234. ptr2++;
  235. bytelen--;
  236. }
  237. }
  238. static inline void readsw(unsigned int *addr, void *data, int wordlen)
  239. {
  240. unsigned short *ptr;
  241. unsigned short *ptr2;
  242. ptr = (unsigned short *)addr;
  243. ptr2 = (unsigned short *)data;
  244. while (wordlen) {
  245. *ptr2 = *ptr;
  246. ptr2++;
  247. wordlen--;
  248. }
  249. }
  250. static inline void readsl(unsigned int *addr, void *data, int longlen)
  251. {
  252. unsigned int *ptr;
  253. unsigned int *ptr2;
  254. ptr = (unsigned int *)addr;
  255. ptr2 = (unsigned int *)data;
  256. while (longlen) {
  257. *ptr2 = *ptr;
  258. ptr2++;
  259. longlen--;
  260. }
  261. }
  262. static inline void writesb(unsigned int *addr, const void *data, int bytelen)
  263. {
  264. unsigned char *ptr;
  265. unsigned char *ptr2;
  266. ptr = (unsigned char *)addr;
  267. ptr2 = (unsigned char *)data;
  268. while (bytelen) {
  269. *ptr = *ptr2;
  270. ptr2++;
  271. bytelen--;
  272. }
  273. }
  274. static inline void writesw(unsigned int *addr, const void *data, int wordlen)
  275. {
  276. unsigned short *ptr;
  277. unsigned short *ptr2;
  278. ptr = (unsigned short *)addr;
  279. ptr2 = (unsigned short *)data;
  280. while (wordlen) {
  281. *ptr = *ptr2;
  282. ptr2++;
  283. wordlen--;
  284. }
  285. }
  286. static inline void writesl(unsigned int *addr, const void *data, int longlen)
  287. {
  288. unsigned int *ptr;
  289. unsigned int *ptr2;
  290. ptr = (unsigned int *)addr;
  291. ptr2 = (unsigned int *)data;
  292. while (longlen) {
  293. *ptr = *ptr2;
  294. ptr2++;
  295. longlen--;
  296. }
  297. }
  298. #endif
  299. #define outb_p(val, port) outb((val), (port))
  300. #define outw_p(val, port) outw((val), (port))
  301. #define outl_p(val, port) outl((val), (port))
  302. #define inb_p(port) inb((port))
  303. #define inw_p(port) inw((port))
  304. #define inl_p(port) inl((port))
  305. #define outsb_p(port, from, len) outsb(port, from, len)
  306. #define outsw_p(port, from, len) outsw(port, from, len)
  307. #define outsl_p(port, from, len) outsl(port, from, len)
  308. #define insb_p(port, to, len) insb(port, to, len)
  309. #define insw_p(port, to, len) insw(port, to, len)
  310. #define insl_p(port, to, len) insl(port, to, len)
  311. /*
  312. * DMA-consistent mapping functions. These allocate/free a region of
  313. * uncached, unwrite-buffered mapped memory space for use with DMA
  314. * devices. This is the "generic" version. The PCI specific version
  315. * is in pci.h
  316. */
  317. /*
  318. * String version of IO memory access ops:
  319. */
  320. /*
  321. * If this architecture has PCI memory IO, then define the read/write
  322. * macros. These should only be used with the cookie passed from
  323. * ioremap.
  324. */
  325. #ifdef __mem_pci
  326. #define readb(c) ({ unsigned int __v = \
  327. __raw_readb(__mem_pci(c)); __v; })
  328. #define readw(c) ({ unsigned int __v = \
  329. le16_to_cpu(__raw_readw(__mem_pci(c))); __v; })
  330. #define readl(c) ({ unsigned int __v = \
  331. le32_to_cpu(__raw_readl(__mem_pci(c))); __v; })
  332. #define writeb(v, c) __raw_writeb(v, __mem_pci(c))
  333. #define writew(v, c) __raw_writew(cpu_to_le16(v), __mem_pci(c))
  334. #define writel(v, c) __raw_writel(cpu_to_le32(v), __mem_pci(c))
  335. #define memset_io(c, v, l) _memset_io(__mem_pci(c), (v), (l))
  336. #define memcpy_fromio(a, c, l) _memcpy_fromio((a), __mem_pci(c), (l))
  337. #define memcpy_toio(c, a, l) _memcpy_toio(__mem_pci(c), (a), (l))
  338. #define eth_io_copy_and_sum(s, c, l, b) \
  339. eth_copy_and_sum((s), __mem_pci(c), (l), (b))
  340. static inline int check_signature(ulong io_addr, const uchar *s, int len)
  341. {
  342. int retval = 0;
  343. do {
  344. if (readb(io_addr) != *s)
  345. goto out;
  346. io_addr++;
  347. s++;
  348. len--;
  349. } while (len);
  350. retval = 1;
  351. out:
  352. return retval;
  353. }
  354. #endif /* __mem_pci */
  355. /*
  356. * If this architecture has ISA IO, then define the isa_read/isa_write
  357. * macros.
  358. */
  359. #ifdef __mem_isa
  360. #define isa_readb(addr) __raw_readb(__mem_isa(addr))
  361. #define isa_readw(addr) __raw_readw(__mem_isa(addr))
  362. #define isa_readl(addr) __raw_readl(__mem_isa(addr))
  363. #define isa_writeb(val, addr) __raw_writeb(val, __mem_isa(addr))
  364. #define isa_writew(val, addr) __raw_writew(val, __mem_isa(addr))
  365. #define isa_writel(val, addr) __raw_writel(val, __mem_isa(addr))
  366. #define isa_memset_io(a, b, c) _memset_io(__mem_isa(a), (b), (c))
  367. #define isa_memcpy_fromio(a, b, c) _memcpy_fromio((a), __mem_isa(b), (c))
  368. #define isa_memcpy_toio(a, b, c) _memcpy_toio(__mem_isa((a)), (b), (c))
  369. #define isa_eth_io_copy_and_sum(a, b, c, d) \
  370. eth_copy_and_sum((a), __mem_isa(b), (c), (d))
  371. static inline int
  372. isa_check_signature(ulong io_addr, const uchar *s, int len)
  373. {
  374. int retval = 0;
  375. do {
  376. if (isa_readb(io_addr) != *s)
  377. goto out;
  378. io_addr++;
  379. s++;
  380. len--;
  381. } while (len);
  382. retval = 1;
  383. out:
  384. return retval;
  385. }
  386. #else /* __mem_isa */
  387. #define isa_readb(addr) (__readwrite_bug("isa_readb"), 0)
  388. #define isa_readw(addr) (__readwrite_bug("isa_readw"), 0)
  389. #define isa_readl(addr) (__readwrite_bug("isa_readl"), 0)
  390. #define isa_writeb(val, addr) __readwrite_bug("isa_writeb")
  391. #define isa_writew(val, addr) __readwrite_bug("isa_writew")
  392. #define isa_writel(val, addr) __readwrite_bug("isa_writel")
  393. #define isa_memset_io(a, b, c) __readwrite_bug("isa_memset_io")
  394. #define isa_memcpy_fromio(a, b, c) __readwrite_bug("isa_memcpy_fromio")
  395. #define isa_memcpy_toio(a, b, c) __readwrite_bug("isa_memcpy_toio")
  396. #define isa_eth_io_copy_and_sum(a, b, c, d) \
  397. __readwrite_bug("isa_eth_io_copy_and_sum")
  398. #define isa_check_signature(io, sig, len) (0)
  399. #endif /* __mem_isa */
  400. #endif /* __KERNEL__ */
  401. #endif /* __ASM_RISCV_IO_H */