Lines Matching +full:non +full:- +full:volatile

1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Copyright (C) 1996-2000 Russell King
8 * 16-Sep-1996 RMK Inlined the inx/outx functions & optimised for both
10 * 04-Dec-1997 RMK Moved a lot of this stuff to the new architecture
12 * 27-Mar-1999 PJB Second parameter of memcpy_toio is const..
13 * 04-Apr-1999 PJB Added check_signature.
14 * 12-Dec-1999 RMK More cleanups
15 * 18-Jun-2000 RMK Removed virt_to_* and friends definitions
16 * 05-Oct-2004 BJD Moved memory string functions to use void __iomem
27 #include <asm-generic/pci_iomap.h>
36 * Atomic MMIO-wide IO modify
42 * Generic IO read/write. These perform native-endian accesses. Note
43 * that some architectures will want to re-define __raw_{read,write}w.
45 void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen);
46 void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen);
47 void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen);
49 void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen);
50 void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen);
51 void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
55 * Half-word accesses are problematic with RiscPC due to limitations of
56 * the bus. Rather than special-case the machine, just let the compiler
59 #define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
60 #define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (…
68 static inline void __raw_writew(u16 val, volatile void __iomem *addr) in __raw_writew()
70 asm volatile("strh %1, %0" in __raw_writew()
71 : : "Q" (*(volatile u16 __force *)addr), "r" (val)); in __raw_writew()
75 static inline u16 __raw_readw(const volatile void __iomem *addr) in __raw_readw()
78 asm volatile("ldrh %0, %1" in __raw_readw()
80 : "Q" (*(volatile u16 __force *)addr)); in __raw_readw()
86 static inline void __raw_writeb(u8 val, volatile void __iomem *addr) in __raw_writeb()
88 asm volatile("strb %1, %0" in __raw_writeb()
89 : : "Qo" (*(volatile u8 __force *)addr), "r" (val)); in __raw_writeb()
93 static inline void __raw_writel(u32 val, volatile void __iomem *addr) in __raw_writel()
95 asm volatile("str %1, %0" in __raw_writel()
96 : : "Qo" (*(volatile u32 __force *)addr), "r" (val)); in __raw_writel()
100 static inline u8 __raw_readb(const volatile void __iomem *addr) in __raw_readb()
103 asm volatile("ldrb %0, %1" in __raw_readb()
105 : "Qo" (*(volatile u8 __force *)addr)); in __raw_readb()
110 static inline u32 __raw_readl(const volatile void __iomem *addr) in __raw_readl()
113 asm volatile("ldr %0, %1" in __raw_readl()
115 : "Qo" (*(volatile u32 __force *)addr)); in __raw_readl()
135 * /proc/vmalloc to use - and should only be used in non-inline functions.
197 * Now, pick up the machine-defined IO definitions
212 * -------------------------
222 * PCI: D0-D7 D8-D15 D16-D23 D24-D31
223 * ARM: D24-D31 D16-D23 D8-D15 D0-D7
228 * Note that we prevent GCC re-ordering or caching values in expressions
232 * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
259 extern void _memcpy_fromio(void *, const volatile void __iomem *, size_t);
260 extern void _memcpy_toio(volatile void __iomem *, const void *, size_t);
261 extern void _memset_io(volatile void __iomem *, int, size_t);
265 * ------------------------
301 static inline void memset_io(volatile void __iomem *dst, unsigned c, in memset_io()
309 static inline void memcpy_fromio(void *to, const volatile void __iomem *from, in memcpy_fromio()
317 static inline void memcpy_toio(volatile void __iomem *to, const void *from, in memcpy_toio()
343 * ioremap_wc() Normal Non-cacheable n/a
344 * ioremap_wt() Normal Non-cacheable n/a
347 * - no access speculation
348 * - no repetition (eg, on return from an exception)
349 * - number, order and size of accesses are maintained
350 * - unaligned accesses are "unpredictable"
351 * - writes may be delayed before they hit the endpoint device
354 * - reads can be repeated with no side effects
355 * - repeated reads return the last value written
356 * - reads can fetch additional locations without side effects
357 * - writes can be repeated (in certain cases) with no side effects
358 * - writes can be merged before accessing the target
359 * - unaligned accesses can be supported
360 * - ordering is not guaranteed without explicit dependencies or barrier
362 * - writes may be delayed before they hit the endpoint memory
381 void iounmap(volatile void __iomem *io_addr);
410 #include <asm-generic/io.h>