6 * read{b,w,l,q}/write{b,w,l,q} are for PCI,
7 * while in{b,w,l}/out{b,w,l} are for ISA
9 * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
10 * and 'string' versions: ins{b,w,l}/outs{b,w,l}
12 * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
13 * automatically, there are also __raw versions, which do not.
15 * Historically, we have also had ctrl_in{b,w,l,q}/ctrl_out{b,w,l,q} for
16 * SuperH specific I/O (raw I/O to on-chip CPU peripherals). In practice
17 * these have the same semantics as the __raw variants, and as such, all
18 * new code should be using the __raw versions.
20 #include <linux/errno.h>
21 #include <asm/cache.h>
22 #include <asm/system.h>
23 #include <asm/addrspace.h>
24 #include <asm/machvec.h>
25 #include <asm/pgtable.h>
26 #include <asm-generic/iomap.h>
29 #define __IO_PREFIX generic
30 #include <asm/io_generic.h>
31 #include <asm/io_trapped.h>
33 #define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
34 #define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
35 #define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
36 #define __raw_writeq(v,a) (__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
38 #define __raw_readb(a) (__chk_io_ptr(a), *(volatile u8 __force *)(a))
39 #define __raw_readw(a) (__chk_io_ptr(a), *(volatile u16 __force *)(a))
40 #define __raw_readl(a) (__chk_io_ptr(a), *(volatile u32 __force *)(a))
41 #define __raw_readq(a) (__chk_io_ptr(a), *(volatile u64 __force *)(a))
43 #define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; })
44 #define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16) \
45 __raw_readw(c)); __v; })
46 #define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32) \
47 __raw_readl(c)); __v; })
48 #define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64) \
49 __raw_readq(c)); __v; })
51 #define writeb_relaxed(v,c) ((void)__raw_writeb(v,c))
52 #define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
54 #define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
56 #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64) \
59 #define readb(a) ({ u8 r_ = readb_relaxed(a); rmb(); r_; })
60 #define readw(a) ({ u16 r_ = readw_relaxed(a); rmb(); r_; })
61 #define readl(a) ({ u32 r_ = readl_relaxed(a); rmb(); r_; })
62 #define readq(a) ({ u64 r_ = readq_relaxed(a); rmb(); r_; })
64 #define writeb(v,a) ({ wmb(); writeb_relaxed((v),(a)); })
65 #define writew(v,a) ({ wmb(); writew_relaxed((v),(a)); })
66 #define writel(v,a) ({ wmb(); writel_relaxed((v),(a)); })
67 #define writeq(v,a) ({ wmb(); writeq_relaxed((v),(a)); })
69 #define readsb(p,d,l) __raw_readsb(p,d,l)
70 #define readsw(p,d,l) __raw_readsw(p,d,l)
71 #define readsl(p,d,l) __raw_readsl(p,d,l)
73 #define writesb(p,d,l) __raw_writesb(p,d,l)
74 #define writesw(p,d,l) __raw_writesw(p,d,l)
75 #define writesl(p,d,l) __raw_writesl(p,d,l)
77 #define __BUILD_UNCACHED_IO(bwlq, type) \
78 static inline type read##bwlq##_uncached(unsigned long addr) \
82 ret = __raw_read##bwlq(addr); \
87 static inline void write##bwlq##_uncached(type v, unsigned long addr) \
90 __raw_write##bwlq(v, addr); \
94 __BUILD_UNCACHED_IO(b, u8)
95 __BUILD_UNCACHED_IO(w, u16)
96 __BUILD_UNCACHED_IO(l, u32)
97 __BUILD_UNCACHED_IO(q, u64)
99 #define __BUILD_MEMORY_STRING(pfx, bwlq, type) \
102 pfx##writes##bwlq(volatile void __iomem *mem, const void *addr, \
103 unsigned int count) \
105 const volatile type *__addr = addr; \
108 __raw_write##bwlq(*__addr, mem); \
113 static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
114 void *addr, unsigned int count) \
116 volatile type *__addr = addr; \
119 *__addr = __raw_read##bwlq(mem); \
124 __BUILD_MEMORY_STRING(__raw_, b, u8)
125 __BUILD_MEMORY_STRING(__raw_, w, u16)
127 #ifdef CONFIG_SUPERH32
128 void __raw_writesl(void __iomem *addr, const void *data, int longlen);
129 void __raw_readsl(const void __iomem *addr, void *data, int longlen);
131 __BUILD_MEMORY_STRING(__raw_, l, u32)
134 __BUILD_MEMORY_STRING(__raw_, q, u64)
136 #ifdef CONFIG_HAS_IOPORT
139 * Slowdown I/O port space accesses for antique hardware.
141 #undef CONF_SLOWDOWN_IO
144 * On SuperH I/O ports are memory mapped, so we access them using normal
145 * load/store instructions. sh_io_port_base is the virtual address to
146 * which all ports are being mapped.
148 extern const unsigned long sh_io_port_base;
150 static inline void __set_io_port_base(unsigned long pbase)
152 *(unsigned long *)&sh_io_port_base = pbase;
156 #ifdef CONFIG_GENERIC_IOMAP
157 #define __ioport_map ioport_map
159 extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
162 #ifdef CONF_SLOWDOWN_IO
163 #define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
168 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
170 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
172 volatile type *__addr; \
174 __addr = __ioport_map(port, sizeof(type)); \
179 static inline type pfx##in##bwlq##p(unsigned long port) \
181 volatile type *__addr; \
184 __addr = __ioport_map(port, sizeof(type)); \
191 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
192 __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
193 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
195 #define BUILDIO_IOPORT(bwlq, type) \
196 __BUILD_IOPORT_PFX(, bwlq, type)
198 BUILDIO_IOPORT(b, u8)
199 BUILDIO_IOPORT(w, u16)
200 BUILDIO_IOPORT(l, u32)
201 BUILDIO_IOPORT(q, u64)
203 #define __BUILD_IOPORT_STRING(bwlq, type) \
205 static inline void outs##bwlq(unsigned long port, const void *addr, \
206 unsigned int count) \
208 const volatile type *__addr = addr; \
211 out##bwlq(*__addr, port); \
216 static inline void ins##bwlq(unsigned long port, void *addr, \
217 unsigned int count) \
219 volatile type *__addr = addr; \
222 *__addr = in##bwlq(port); \
227 __BUILD_IOPORT_STRING(b, u8)
228 __BUILD_IOPORT_STRING(w, u16)
229 __BUILD_IOPORT_STRING(l, u32)
230 __BUILD_IOPORT_STRING(q, u64)
235 * Legacy SuperH on-chip I/O functions
237 * These are all deprecated, all new (and especially cross-platform) code
238 * should be using the __raw_xxx() routines directly.
240 static inline u8 __deprecated ctrl_inb(unsigned long addr)
242 return __raw_readb(addr);
245 static inline u16 __deprecated ctrl_inw(unsigned long addr)
247 return __raw_readw(addr);
250 static inline u32 __deprecated ctrl_inl(unsigned long addr)
252 return __raw_readl(addr);
255 static inline u64 __deprecated ctrl_inq(unsigned long addr)
257 return __raw_readq(addr);
260 static inline void __deprecated ctrl_outb(u8 v, unsigned long addr)
262 __raw_writeb(v, addr);
265 static inline void __deprecated ctrl_outw(u16 v, unsigned long addr)
267 __raw_writew(v, addr);
270 static inline void __deprecated ctrl_outl(u32 v, unsigned long addr)
272 __raw_writel(v, addr);
275 static inline void __deprecated ctrl_outq(u64 v, unsigned long addr)
277 __raw_writeq(v, addr);
280 #define IO_SPACE_LIMIT 0xffffffff
282 /* synco on SH-4A, otherwise a nop */
283 #define mmiowb() wmb()
285 /* We really want to try and get these to memcpy etc */
286 void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
287 void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
288 void memset_io(volatile void __iomem *, int, unsigned long);
290 /* Quad-word real-mode I/O, don't ask.. */
291 unsigned long long peek_real_address_q(unsigned long long addr);
292 unsigned long long poke_real_address_q(unsigned long long addr,
293 unsigned long long val);
295 #if !defined(CONFIG_MMU)
296 #define virt_to_phys(address) ((unsigned long)(address))
297 #define phys_to_virt(address) ((void *)(address))
299 #define virt_to_phys(address) (__pa(address))
300 #define phys_to_virt(address) (__va(address))
304 * On 32-bit SH, we traditionally have the whole physical address space
305 * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
306 * not need to do anything but place the address in the proper segment.
307 * This is true for P1 and P2 addresses, as well as some P3 ones.
308 * However, most of the P3 addresses and newer cores using extended
309 * addressing need to map through page tables, so the ioremap()
310 * implementation becomes a bit more complicated.
312 * See arch/sh/mm/ioremap.c for additional notes on this.
314 * We cheat a bit and always return uncachable areas until we've fixed
315 * the drivers to handle caching properly.
317 * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
318 * doesn't exist, so everything must go through page tables.
321 void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
322 pgprot_t prot, void *caller);
323 void __iounmap(void __iomem *addr);
325 static inline void __iomem *
326 __ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
328 return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
331 static inline void __iomem *
332 __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
335 phys_addr_t last_addr = offset + size - 1;
338 * For P1 and P2 space this is trivial, as everything is already
339 * mapped. Uncached access for P1 addresses are done through P2.
340 * In the P3 case or for addresses outside of the 29-bit space,
341 * mapping must be done by the PMB or by using page tables.
343 if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
344 if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE))
345 return (void __iomem *)P1SEGADDR(offset);
347 return (void __iomem *)P2SEGADDR(offset);
350 /* P4 above the store queues are always mapped. */
351 if (unlikely(offset >= P3_ADDR_MAX))
352 return (void __iomem *)P4SEGADDR(offset);
358 static inline void __iomem *
359 __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
363 ret = __ioremap_trapped(offset, size);
367 ret = __ioremap_29bit(offset, size, prot);
371 return __ioremap(offset, size, prot);
374 #define __ioremap(offset, size, prot) ((void __iomem *)(offset))
375 #define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
376 #define __iounmap(addr) do { } while (0)
377 #endif /* CONFIG_MMU */
379 static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
381 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
384 static inline void __iomem *
385 ioremap_cache(phys_addr_t offset, unsigned long size)
387 return __ioremap_mode(offset, size, PAGE_KERNEL);
390 #ifdef CONFIG_HAVE_IOREMAP_PROT
391 static inline void __iomem *
392 ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
394 return __ioremap_mode(offset, size, __pgprot(flags));
398 #ifdef CONFIG_IOREMAP_FIXED
399 extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
400 extern int iounmap_fixed(void __iomem *);
401 extern void ioremap_fixed_init(void);
403 static inline void __iomem *
404 ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
410 static inline void ioremap_fixed_init(void) { }
411 static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
414 #define ioremap_nocache ioremap
415 #define iounmap __iounmap
418 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
421 #define xlate_dev_mem_ptr(p) __va(p)
424 * Convert a virtual cached pointer to an uncached pointer
426 #define xlate_dev_kmem_ptr(p) p
428 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
429 int valid_phys_addr_range(unsigned long addr, size_t size);
430 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
432 #endif /* __KERNEL__ */
434 #endif /* __ASM_SH_IO_H */