1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/bug.h>
3  #include <linux/device.h>
4  #include <linux/errno.h>
5  #include <linux/export.h>
6  #include <linux/gfp_types.h>
7  #include <linux/io.h>
8  #include <linux/ioport.h>
9  #include <linux/of_address.h>
10  #include <linux/types.h>
11  
12  enum devm_ioremap_type {
13  	DEVM_IOREMAP = 0,
14  	DEVM_IOREMAP_UC,
15  	DEVM_IOREMAP_WC,
16  	DEVM_IOREMAP_NP,
17  };
18  
devm_ioremap_release(struct device * dev,void * res)19  void devm_ioremap_release(struct device *dev, void *res)
20  {
21  	iounmap(*(void __iomem **)res);
22  }
23  
devm_ioremap_match(struct device * dev,void * res,void * match_data)24  static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
25  {
26  	return *(void **)res == match_data;
27  }
28  
__devm_ioremap(struct device * dev,resource_size_t offset,resource_size_t size,enum devm_ioremap_type type)29  static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
30  				    resource_size_t size,
31  				    enum devm_ioremap_type type)
32  {
33  	void __iomem **ptr, *addr = NULL;
34  
35  	ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL,
36  				dev_to_node(dev));
37  	if (!ptr)
38  		return NULL;
39  
40  	switch (type) {
41  	case DEVM_IOREMAP:
42  		addr = ioremap(offset, size);
43  		break;
44  	case DEVM_IOREMAP_UC:
45  		addr = ioremap_uc(offset, size);
46  		break;
47  	case DEVM_IOREMAP_WC:
48  		addr = ioremap_wc(offset, size);
49  		break;
50  	case DEVM_IOREMAP_NP:
51  		addr = ioremap_np(offset, size);
52  		break;
53  	}
54  
55  	if (addr) {
56  		*ptr = addr;
57  		devres_add(dev, ptr);
58  	} else
59  		devres_free(ptr);
60  
61  	return addr;
62  }
63  
64  /**
65   * devm_ioremap - Managed ioremap()
66   * @dev: Generic device to remap IO address for
67   * @offset: Resource address to map
68   * @size: Size of map
69   *
70   * Managed ioremap().  Map is automatically unmapped on driver detach.
71   */
devm_ioremap(struct device * dev,resource_size_t offset,resource_size_t size)72  void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
73  			   resource_size_t size)
74  {
75  	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
76  }
77  EXPORT_SYMBOL(devm_ioremap);
78  
79  /**
80   * devm_ioremap_uc - Managed ioremap_uc()
81   * @dev: Generic device to remap IO address for
82   * @offset: Resource address to map
83   * @size: Size of map
84   *
85   * Managed ioremap_uc().  Map is automatically unmapped on driver detach.
86   */
devm_ioremap_uc(struct device * dev,resource_size_t offset,resource_size_t size)87  void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
88  			      resource_size_t size)
89  {
90  	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
91  }
92  EXPORT_SYMBOL_GPL(devm_ioremap_uc);
93  
94  /**
95   * devm_ioremap_wc - Managed ioremap_wc()
96   * @dev: Generic device to remap IO address for
97   * @offset: Resource address to map
98   * @size: Size of map
99   *
100   * Managed ioremap_wc().  Map is automatically unmapped on driver detach.
101   */
devm_ioremap_wc(struct device * dev,resource_size_t offset,resource_size_t size)102  void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
103  			      resource_size_t size)
104  {
105  	return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
106  }
107  EXPORT_SYMBOL(devm_ioremap_wc);
108  
109  /**
110   * devm_iounmap - Managed iounmap()
111   * @dev: Generic device to unmap for
112   * @addr: Address to unmap
113   *
114   * Managed iounmap().  @addr must have been mapped using devm_ioremap*().
115   */
devm_iounmap(struct device * dev,void __iomem * addr)116  void devm_iounmap(struct device *dev, void __iomem *addr)
117  {
118  	WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
119  			       (__force void *)addr));
120  	iounmap(addr);
121  }
122  EXPORT_SYMBOL(devm_iounmap);
123  
124  static void __iomem *
__devm_ioremap_resource(struct device * dev,const struct resource * res,enum devm_ioremap_type type)125  __devm_ioremap_resource(struct device *dev, const struct resource *res,
126  			enum devm_ioremap_type type)
127  {
128  	resource_size_t size;
129  	void __iomem *dest_ptr;
130  	char *pretty_name;
131  	int ret;
132  
133  	BUG_ON(!dev);
134  
135  	if (!res || resource_type(res) != IORESOURCE_MEM) {
136  		ret = dev_err_probe(dev, -EINVAL, "invalid resource %pR\n", res);
137  		return IOMEM_ERR_PTR(ret);
138  	}
139  
140  	if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
141  		type = DEVM_IOREMAP_NP;
142  
143  	size = resource_size(res);
144  
145  	if (res->name)
146  		pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
147  					     dev_name(dev), res->name);
148  	else
149  		pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
150  	if (!pretty_name) {
151  		ret = dev_err_probe(dev, -ENOMEM, "can't generate pretty name for resource %pR\n", res);
152  		return IOMEM_ERR_PTR(ret);
153  	}
154  
155  	if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
156  		ret = dev_err_probe(dev, -EBUSY, "can't request region for resource %pR\n", res);
157  		return IOMEM_ERR_PTR(ret);
158  	}
159  
160  	dest_ptr = __devm_ioremap(dev, res->start, size, type);
161  	if (!dest_ptr) {
162  		devm_release_mem_region(dev, res->start, size);
163  		ret = dev_err_probe(dev, -ENOMEM, "ioremap failed for resource %pR\n", res);
164  		return IOMEM_ERR_PTR(ret);
165  	}
166  
167  	return dest_ptr;
168  }
169  
170  /**
171   * devm_ioremap_resource() - check, request region, and ioremap resource
172   * @dev: generic device to handle the resource for
173   * @res: resource to be handled
174   *
175   * Checks that a resource is a valid memory region, requests the memory
176   * region and ioremaps it. All operations are managed and will be undone
177   * on driver detach.
178   *
179   * Usage example:
180   *
181   *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
182   *	base = devm_ioremap_resource(&pdev->dev, res);
183   *	if (IS_ERR(base))
184   *		return PTR_ERR(base);
185   *
186   * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
187   * on failure.
188   */
devm_ioremap_resource(struct device * dev,const struct resource * res)189  void __iomem *devm_ioremap_resource(struct device *dev,
190  				    const struct resource *res)
191  {
192  	return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
193  }
194  EXPORT_SYMBOL(devm_ioremap_resource);
195  
196  /**
197   * devm_ioremap_resource_wc() - write-combined variant of
198   *				devm_ioremap_resource()
199   * @dev: generic device to handle the resource for
200   * @res: resource to be handled
201   *
202   * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
203   * on failure.
204   */
devm_ioremap_resource_wc(struct device * dev,const struct resource * res)205  void __iomem *devm_ioremap_resource_wc(struct device *dev,
206  				       const struct resource *res)
207  {
208  	return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
209  }
210  
211  /*
212   * devm_of_iomap - Requests a resource and maps the memory mapped IO
213   *		   for a given device_node managed by a given device
214   *
215   * Checks that a resource is a valid memory region, requests the memory
216   * region and ioremaps it. All operations are managed and will be undone
217   * on driver detach of the device.
218   *
219   * This is to be used when a device requests/maps resources described
220   * by other device tree nodes (children or otherwise).
221   *
222   * @dev:	The device "managing" the resource
223   * @node:       The device-tree node where the resource resides
224   * @index:	index of the MMIO range in the "reg" property
225   * @size:	Returns the size of the resource (pass NULL if not needed)
226   *
227   * Usage example:
228   *
229   *	base = devm_of_iomap(&pdev->dev, node, 0, NULL);
230   *	if (IS_ERR(base))
231   *		return PTR_ERR(base);
232   *
233   * Please Note: This is not a one-to-one replacement for of_iomap() because the
234   * of_iomap() function does not track whether the region is already mapped.  If
235   * two drivers try to map the same memory, the of_iomap() function will succeed
236   * but the devm_of_iomap() function will return -EBUSY.
237   *
238   * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
239   * error code on failure.
240   */
devm_of_iomap(struct device * dev,struct device_node * node,int index,resource_size_t * size)241  void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
242  			    resource_size_t *size)
243  {
244  	struct resource res;
245  
246  	if (of_address_to_resource(node, index, &res))
247  		return IOMEM_ERR_PTR(-EINVAL);
248  	if (size)
249  		*size = resource_size(&res);
250  	return devm_ioremap_resource(dev, &res);
251  }
252  EXPORT_SYMBOL(devm_of_iomap);
253  
254  #ifdef CONFIG_HAS_IOPORT_MAP
255  /*
256   * Generic iomap devres
257   */
devm_ioport_map_release(struct device * dev,void * res)258  static void devm_ioport_map_release(struct device *dev, void *res)
259  {
260  	ioport_unmap(*(void __iomem **)res);
261  }
262  
devm_ioport_map_match(struct device * dev,void * res,void * match_data)263  static int devm_ioport_map_match(struct device *dev, void *res,
264  				 void *match_data)
265  {
266  	return *(void **)res == match_data;
267  }
268  
269  /**
270   * devm_ioport_map - Managed ioport_map()
271   * @dev: Generic device to map ioport for
272   * @port: Port to map
273   * @nr: Number of ports to map
274   *
275   * Managed ioport_map().  Map is automatically unmapped on driver
276   * detach.
277   *
278   * Return: a pointer to the remapped memory or NULL on failure.
279   */
devm_ioport_map(struct device * dev,unsigned long port,unsigned int nr)280  void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
281  			       unsigned int nr)
282  {
283  	void __iomem **ptr, *addr;
284  
285  	ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL,
286  				dev_to_node(dev));
287  	if (!ptr)
288  		return NULL;
289  
290  	addr = ioport_map(port, nr);
291  	if (addr) {
292  		*ptr = addr;
293  		devres_add(dev, ptr);
294  	} else
295  		devres_free(ptr);
296  
297  	return addr;
298  }
299  EXPORT_SYMBOL(devm_ioport_map);
300  
301  /**
302   * devm_ioport_unmap - Managed ioport_unmap()
303   * @dev: Generic device to unmap for
304   * @addr: Address to unmap
305   *
306   * Managed ioport_unmap().  @addr must have been mapped using
307   * devm_ioport_map().
308   */
devm_ioport_unmap(struct device * dev,void __iomem * addr)309  void devm_ioport_unmap(struct device *dev, void __iomem *addr)
310  {
311  	ioport_unmap(addr);
312  	WARN_ON(devres_destroy(dev, devm_ioport_map_release,
313  			       devm_ioport_map_match, (__force void *)addr));
314  }
315  EXPORT_SYMBOL(devm_ioport_unmap);
316  #endif /* CONFIG_HAS_IOPORT_MAP */
317  
devm_arch_phys_ac_add_release(struct device * dev,void * res)318  static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
319  {
320  	arch_phys_wc_del(*((int *)res));
321  }
322  
323  /**
324   * devm_arch_phys_wc_add - Managed arch_phys_wc_add()
325   * @dev: Managed device
326   * @base: Memory base address
327   * @size: Size of memory range
328   *
329   * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback.
330   * See arch_phys_wc_add() for more information.
331   */
devm_arch_phys_wc_add(struct device * dev,unsigned long base,unsigned long size)332  int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size)
333  {
334  	int *mtrr;
335  	int ret;
336  
337  	mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL,
338  				 dev_to_node(dev));
339  	if (!mtrr)
340  		return -ENOMEM;
341  
342  	ret = arch_phys_wc_add(base, size);
343  	if (ret < 0) {
344  		devres_free(mtrr);
345  		return ret;
346  	}
347  
348  	*mtrr = ret;
349  	devres_add(dev, mtrr);
350  
351  	return ret;
352  }
353  EXPORT_SYMBOL(devm_arch_phys_wc_add);
354  
355  struct arch_io_reserve_memtype_wc_devres {
356  	resource_size_t start;
357  	resource_size_t size;
358  };
359  
devm_arch_io_free_memtype_wc_release(struct device * dev,void * res)360  static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res)
361  {
362  	const struct arch_io_reserve_memtype_wc_devres *this = res;
363  
364  	arch_io_free_memtype_wc(this->start, this->size);
365  }
366  
367  /**
368   * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc()
369   * @dev: Managed device
370   * @start: Memory base address
371   * @size: Size of memory range
372   *
373   * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc()
374   * and sets up a release callback See arch_io_reserve_memtype_wc() for more
375   * information.
376   */
devm_arch_io_reserve_memtype_wc(struct device * dev,resource_size_t start,resource_size_t size)377  int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start,
378  				    resource_size_t size)
379  {
380  	struct arch_io_reserve_memtype_wc_devres *dr;
381  	int ret;
382  
383  	dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL,
384  			       dev_to_node(dev));
385  	if (!dr)
386  		return -ENOMEM;
387  
388  	ret = arch_io_reserve_memtype_wc(start, size);
389  	if (ret < 0) {
390  		devres_free(dr);
391  		return ret;
392  	}
393  
394  	dr->start = start;
395  	dr->size = size;
396  	devres_add(dev, dr);
397  
398  	return ret;
399  }
400  EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc);
401