1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
4  * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
5  * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
6  * Author: John Garry <john.garry@huawei.com>
7  */
8 
9 #define pr_fmt(fmt)	"LOGIC PIO: " fmt
10 
11 #include <linux/of.h>
12 #include <linux/io.h>
13 #include <linux/logic_pio.h>
14 #include <linux/mm.h>
15 #include <linux/rculist.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 
19 /* The unique hardware address list */
20 static LIST_HEAD(io_range_list);
21 static DEFINE_MUTEX(io_range_mutex);
22 
23 /**
24  * logic_pio_register_range - register logical PIO range for a host
25  * @new_range: pointer to the IO range to be registered.
26  *
27  * Returns 0 on success, the error code in case of failure.
28  * If the range already exists, -EEXIST will be returned, which should be
29  * considered a success.
30  *
31  * Register a new IO range node in the IO range list.
32  */
logic_pio_register_range(struct logic_pio_hwaddr * new_range)33 int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
34 {
35 	struct logic_pio_hwaddr *range;
36 	resource_size_t start;
37 	resource_size_t end;
38 	resource_size_t mmio_end = 0;
39 	resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 	int ret = 0;
41 
42 	if (!new_range || !new_range->fwnode || !new_range->size ||
43 	    (new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops))
44 		return -EINVAL;
45 
46 	start = new_range->hw_start;
47 	end = new_range->hw_start + new_range->size;
48 
49 	mutex_lock(&io_range_mutex);
50 	list_for_each_entry(range, &io_range_list, list) {
51 		if (range->fwnode == new_range->fwnode) {
52 			/* range already there */
53 			ret = -EEXIST;
54 			goto end_register;
55 		}
56 		if (range->flags == LOGIC_PIO_CPU_MMIO &&
57 		    new_range->flags == LOGIC_PIO_CPU_MMIO) {
58 			/* for MMIO ranges we need to check for overlap */
59 			if (start >= range->hw_start + range->size ||
60 			    end < range->hw_start) {
61 				mmio_end = range->io_start + range->size;
62 			} else {
63 				ret = -EFAULT;
64 				goto end_register;
65 			}
66 		} else if (range->flags == LOGIC_PIO_INDIRECT &&
67 			   new_range->flags == LOGIC_PIO_INDIRECT) {
68 			iio_sz += range->size;
69 		}
70 	}
71 
72 	/* range not registered yet, check for available space */
73 	if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
74 		if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
75 			/* if it's too big check if 64K space can be reserved */
76 			if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
77 				ret = -E2BIG;
78 				goto end_register;
79 			}
80 			new_range->size = SZ_64K;
81 			pr_warn("Requested IO range too big, new size set to 64K\n");
82 		}
83 		new_range->io_start = mmio_end;
84 	} else if (new_range->flags == LOGIC_PIO_INDIRECT) {
85 		if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
86 			ret = -E2BIG;
87 			goto end_register;
88 		}
89 		new_range->io_start = iio_sz;
90 	} else {
91 		/* invalid flag */
92 		ret = -EINVAL;
93 		goto end_register;
94 	}
95 
96 	list_add_tail_rcu(&new_range->list, &io_range_list);
97 
98 end_register:
99 	mutex_unlock(&io_range_mutex);
100 	return ret;
101 }
102 
103 /**
104  * logic_pio_unregister_range - unregister a logical PIO range for a host
105  * @range: pointer to the IO range which has been already registered.
106  *
107  * Unregister a previously-registered IO range node.
108  */
logic_pio_unregister_range(struct logic_pio_hwaddr * range)109 void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
110 {
111 	mutex_lock(&io_range_mutex);
112 	list_del_rcu(&range->list);
113 	mutex_unlock(&io_range_mutex);
114 	synchronize_rcu();
115 }
116 
117 /**
118  * find_io_range_by_fwnode - find logical PIO range for given FW node
119  * @fwnode: FW node handle associated with logical PIO range
120  *
121  * Returns pointer to node on success, NULL otherwise.
122  *
123  * Traverse the io_range_list to find the registered node for @fwnode.
124  */
find_io_range_by_fwnode(struct fwnode_handle * fwnode)125 struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
126 {
127 	struct logic_pio_hwaddr *range, *found_range = NULL;
128 
129 	rcu_read_lock();
130 	list_for_each_entry_rcu(range, &io_range_list, list) {
131 		if (range->fwnode == fwnode) {
132 			found_range = range;
133 			break;
134 		}
135 	}
136 	rcu_read_unlock();
137 
138 	return found_range;
139 }
140 
141 /* Return a registered range given an input PIO token */
find_io_range(unsigned long pio)142 static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
143 {
144 	struct logic_pio_hwaddr *range, *found_range = NULL;
145 
146 	rcu_read_lock();
147 	list_for_each_entry_rcu(range, &io_range_list, list) {
148 		if (in_range(pio, range->io_start, range->size)) {
149 			found_range = range;
150 			break;
151 		}
152 	}
153 	rcu_read_unlock();
154 
155 	if (!found_range)
156 		pr_err("PIO entry token 0x%lx invalid\n", pio);
157 
158 	return found_range;
159 }
160 
161 /**
162  * logic_pio_to_hwaddr - translate logical PIO to HW address
163  * @pio: logical PIO value
164  *
165  * Returns HW address if valid, ~0 otherwise.
166  *
167  * Translate the input logical PIO to the corresponding hardware address.
168  * The input PIO should be unique in the whole logical PIO space.
169  */
logic_pio_to_hwaddr(unsigned long pio)170 resource_size_t logic_pio_to_hwaddr(unsigned long pio)
171 {
172 	struct logic_pio_hwaddr *range;
173 
174 	range = find_io_range(pio);
175 	if (range)
176 		return range->hw_start + pio - range->io_start;
177 
178 	return (resource_size_t)~0;
179 }
180 
181 /**
182  * logic_pio_trans_hwaddr - translate HW address to logical PIO
183  * @fwnode: FW node reference for the host
184  * @addr: Host-relative HW address
185  * @size: size to translate
186  *
187  * Returns Logical PIO value if successful, ~0UL otherwise
188  */
logic_pio_trans_hwaddr(struct fwnode_handle * fwnode,resource_size_t addr,resource_size_t size)189 unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
190 				     resource_size_t addr, resource_size_t size)
191 {
192 	struct logic_pio_hwaddr *range;
193 
194 	range = find_io_range_by_fwnode(fwnode);
195 	if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
196 		pr_err("IO range not found or invalid\n");
197 		return ~0UL;
198 	}
199 	if (range->size < size) {
200 		pr_err("resource size %pa cannot fit in IO range size %pa\n",
201 		       &size, &range->size);
202 		return ~0UL;
203 	}
204 	return addr - range->hw_start + range->io_start;
205 }
206 
logic_pio_trans_cpuaddr(resource_size_t addr)207 unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
208 {
209 	struct logic_pio_hwaddr *range;
210 
211 	rcu_read_lock();
212 	list_for_each_entry_rcu(range, &io_range_list, list) {
213 		if (range->flags != LOGIC_PIO_CPU_MMIO)
214 			continue;
215 		if (in_range(addr, range->hw_start, range->size)) {
216 			unsigned long cpuaddr;
217 
218 			cpuaddr = addr - range->hw_start + range->io_start;
219 
220 			rcu_read_unlock();
221 			return cpuaddr;
222 		}
223 	}
224 	rcu_read_unlock();
225 
226 	pr_err("addr %pa not registered in io_range_list\n", &addr);
227 
228 	return ~0UL;
229 }
230 
231 #if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
232 #define BUILD_LOGIC_IO(bwl, type)					\
233 type logic_in##bwl(unsigned long addr)					\
234 {									\
235 	type ret = (type)~0;						\
236 									\
237 	if (addr < MMIO_UPPER_LIMIT) {					\
238 		ret = _in##bwl(addr);					\
239 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
240 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
241 									\
242 		if (entry)						\
243 			ret = entry->ops->in(entry->hostdata,		\
244 					addr, sizeof(type));		\
245 		else							\
246 			WARN_ON_ONCE(1);				\
247 	}								\
248 	return ret;							\
249 }									\
250 									\
251 void logic_out##bwl(type value, unsigned long addr)			\
252 {									\
253 	if (addr < MMIO_UPPER_LIMIT) {					\
254 		_out##bwl(value, addr);				\
255 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
256 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
257 									\
258 		if (entry)						\
259 			entry->ops->out(entry->hostdata,		\
260 					addr, value, sizeof(type));	\
261 		else							\
262 			WARN_ON_ONCE(1);				\
263 	}								\
264 }									\
265 									\
266 void logic_ins##bwl(unsigned long addr, void *buffer,			\
267 		    unsigned int count)					\
268 {									\
269 	if (addr < MMIO_UPPER_LIMIT) {					\
270 		reads##bwl(PCI_IOBASE + addr, buffer, count);		\
271 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
272 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
273 									\
274 		if (entry)						\
275 			entry->ops->ins(entry->hostdata,		\
276 				addr, buffer, sizeof(type), count);	\
277 		else							\
278 			WARN_ON_ONCE(1);				\
279 	}								\
280 									\
281 }									\
282 									\
283 void logic_outs##bwl(unsigned long addr, const void *buffer,		\
284 		     unsigned int count)				\
285 {									\
286 	if (addr < MMIO_UPPER_LIMIT) {					\
287 		writes##bwl(PCI_IOBASE + addr, buffer, count);		\
288 	} else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) {	\
289 		struct logic_pio_hwaddr *entry = find_io_range(addr);	\
290 									\
291 		if (entry)						\
292 			entry->ops->outs(entry->hostdata,		\
293 				addr, buffer, sizeof(type), count);	\
294 		else							\
295 			WARN_ON_ONCE(1);				\
296 	}								\
297 }
298 
299 BUILD_LOGIC_IO(b, u8)
300 EXPORT_SYMBOL(logic_inb);
301 EXPORT_SYMBOL(logic_insb);
302 EXPORT_SYMBOL(logic_outb);
303 EXPORT_SYMBOL(logic_outsb);
304 
305 BUILD_LOGIC_IO(w, u16)
306 EXPORT_SYMBOL(logic_inw);
307 EXPORT_SYMBOL(logic_insw);
308 EXPORT_SYMBOL(logic_outw);
309 EXPORT_SYMBOL(logic_outsw);
310 
311 BUILD_LOGIC_IO(l, u32)
312 EXPORT_SYMBOL(logic_inl);
313 EXPORT_SYMBOL(logic_insl);
314 EXPORT_SYMBOL(logic_outl);
315 EXPORT_SYMBOL(logic_outsl);
316 
317 #endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
318