1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright 2006 PathScale, Inc. All Rights Reserved.
4 */
5
6 #ifndef _LINUX_IO_H
7 #define _LINUX_IO_H
8
9 #include <linux/sizes.h>
10 #include <linux/types.h>
11 #include <linux/init.h>
12 #include <linux/bug.h>
13 #include <linux/err.h>
14 #include <asm/io.h>
15 #include <asm/page.h>
16
17 struct device;
18 struct resource;
19
20 #ifndef __iowrite32_copy
21 void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
22 #endif
23
24 void __ioread32_copy(void *to, const void __iomem *from, size_t count);
25
26 #ifndef __iowrite64_copy
27 void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
28 #endif
29
30 #ifdef CONFIG_MMU
31 int ioremap_page_range(unsigned long addr, unsigned long end,
32 phys_addr_t phys_addr, pgprot_t prot);
33 int vmap_page_range(unsigned long addr, unsigned long end,
34 phys_addr_t phys_addr, pgprot_t prot);
35 #else
ioremap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)36 static inline int ioremap_page_range(unsigned long addr, unsigned long end,
37 phys_addr_t phys_addr, pgprot_t prot)
38 {
39 return 0;
40 }
vmap_page_range(unsigned long addr,unsigned long end,phys_addr_t phys_addr,pgprot_t prot)41 static inline int vmap_page_range(unsigned long addr, unsigned long end,
42 phys_addr_t phys_addr, pgprot_t prot)
43 {
44 return 0;
45 }
46 #endif
47
48 /*
49 * Managed iomap interface
50 */
51 #ifdef CONFIG_HAS_IOPORT_MAP
52 void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
53 unsigned int nr);
54 void devm_ioport_unmap(struct device *dev, void __iomem *addr);
55 #else
devm_ioport_map(struct device * dev,unsigned long port,unsigned int nr)56 static inline void __iomem *devm_ioport_map(struct device *dev,
57 unsigned long port,
58 unsigned int nr)
59 {
60 return NULL;
61 }
62
devm_ioport_unmap(struct device * dev,void __iomem * addr)63 static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
64 {
65 }
66 #endif
67
68 #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
69
70 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
71 resource_size_t size);
72 void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
73 resource_size_t size);
74 void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
75 resource_size_t size);
76 void devm_iounmap(struct device *dev, void __iomem *addr);
77 int check_signature(const volatile void __iomem *io_addr,
78 const unsigned char *signature, int length);
79 void devm_ioremap_release(struct device *dev, void *res);
80
81 void *devm_memremap(struct device *dev, resource_size_t offset,
82 size_t size, unsigned long flags);
83 void devm_memunmap(struct device *dev, void *addr);
84
85 /* architectures can override this */
86 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
87 unsigned long size, pgprot_t prot);
88
89
90 #ifdef CONFIG_PCI
91 /*
92 * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
93 * Posting") mandate non-posted configuration transactions. This default
94 * implementation attempts to use the ioremap_np() API to provide this
95 * on arches that support it, and falls back to ioremap() on those that
96 * don't. Overriding this function is deprecated; arches that properly
97 * support non-posted accesses should implement ioremap_np() instead, which
98 * this default implementation can then use to return mappings compliant with
99 * the PCI specification.
100 */
101 #ifndef pci_remap_cfgspace
102 #define pci_remap_cfgspace pci_remap_cfgspace
pci_remap_cfgspace(phys_addr_t offset,size_t size)103 static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
104 size_t size)
105 {
106 return ioremap_np(offset, size) ?: ioremap(offset, size);
107 }
108 #endif
109 #endif
110
111 /*
112 * Some systems do not have legacy ISA devices.
113 * /dev/port is not a valid interface on these systems.
114 * So for those archs, <asm/io.h> should define the following symbol.
115 */
116 #ifndef arch_has_dev_port
117 #define arch_has_dev_port() (1)
118 #endif
119
120 /*
121 * Some systems (x86 without PAT) have a somewhat reliable way to mark a
122 * physical address range such that uncached mappings will actually
123 * end up write-combining. This facility should be used in conjunction
124 * with pgprot_writecombine, ioremap-wc, or set_memory_wc, since it has
125 * no effect if the per-page mechanisms are functional.
126 * (On x86 without PAT, these functions manipulate MTRRs.)
127 *
128 * arch_phys_del_wc(0) or arch_phys_del_wc(any error code) is guaranteed
129 * to have no effect.
130 */
131 #ifndef arch_phys_wc_add
arch_phys_wc_add(unsigned long base,unsigned long size)132 static inline int __must_check arch_phys_wc_add(unsigned long base,
133 unsigned long size)
134 {
135 return 0; /* It worked (i.e. did nothing). */
136 }
137
arch_phys_wc_del(int handle)138 static inline void arch_phys_wc_del(int handle)
139 {
140 }
141
142 #define arch_phys_wc_add arch_phys_wc_add
143 #ifndef arch_phys_wc_index
arch_phys_wc_index(int handle)144 static inline int arch_phys_wc_index(int handle)
145 {
146 return -1;
147 }
148 #define arch_phys_wc_index arch_phys_wc_index
149 #endif
150 #endif
151
152 int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size);
153
154 enum {
155 /* See memremap() kernel-doc for usage description... */
156 MEMREMAP_WB = 1 << 0,
157 MEMREMAP_WT = 1 << 1,
158 MEMREMAP_WC = 1 << 2,
159 MEMREMAP_ENC = 1 << 3,
160 MEMREMAP_DEC = 1 << 4,
161 };
162
163 void *memremap(resource_size_t offset, size_t size, unsigned long flags);
164 void memunmap(void *addr);
165
166 /*
167 * On x86 PAT systems we have memory tracking that keeps track of
168 * the allowed mappings on memory ranges. This tracking works for
169 * all the in-kernel mapping APIs (ioremap*), but where the user
170 * wishes to map a range from a physical device into user memory
171 * the tracking won't be updated. This API is to be used by
172 * drivers which remap physical device pages into userspace,
173 * and wants to make sure they are mapped WC and not UC.
174 */
175 #ifndef arch_io_reserve_memtype_wc
arch_io_reserve_memtype_wc(resource_size_t base,resource_size_t size)176 static inline int arch_io_reserve_memtype_wc(resource_size_t base,
177 resource_size_t size)
178 {
179 return 0;
180 }
181
arch_io_free_memtype_wc(resource_size_t base,resource_size_t size)182 static inline void arch_io_free_memtype_wc(resource_size_t base,
183 resource_size_t size)
184 {
185 }
186 #endif
187
188 int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
189 resource_size_t size);
190
191 #endif /* _LINUX_IO_H */
192