1  // SPDX-License-Identifier: GPL-2.0
2  #include <linux/device.h>
3  #include <linux/pci.h>
4  #include "pci.h"
5  
6  /*
7   * On the state of PCI's devres implementation:
8   *
9   * The older devres API for PCI has two significant problems:
10   *
11   * 1. It is very strongly tied to the statically allocated mapping table in
12   *    struct pcim_iomap_devres below. This is mostly solved in the sense of the
13   *    pcim_ functions in this file providing things like ranged mapping by
14   *    bypassing this table, whereas the functions that were present in the old
15   *    API still enter the mapping addresses into the table for users of the old
16   *    API.
17   *
18   * 2. The region-request-functions in pci.c do become managed IF the device has
19   *    been enabled with pcim_enable_device() instead of pci_enable_device().
20   *    This resulted in the API becoming inconsistent: Some functions have an
21   *    obviously managed counter-part (e.g., pci_iomap() <-> pcim_iomap()),
22   *    whereas some don't and are never managed, while others don't and are
23   *    _sometimes_ managed (e.g. pci_request_region()).
24   *
25   *    Consequently, in the new API, region requests performed by the pcim_
26   *    functions are automatically cleaned up through the devres callback
27   *    pcim_addr_resource_release().
28   *
29   *    Users of pcim_enable_device() + pci_*region*() are redirected in
30   *    pci.c to the managed functions here in this file. This isn't exactly
31   *    perfect, but the only alternative way would be to port ALL drivers
32   *    using said combination to pcim_ functions.
33   *
34   * TODO:
35   * Remove the legacy table entirely once all calls to pcim_iomap_table() in
36   * the kernel have been removed.
37   */
38  
39  /*
40   * Legacy struct storing addresses to whole mapped BARs.
41   */
42  struct pcim_iomap_devres {
43  	void __iomem *table[PCI_STD_NUM_BARS];
44  };
45  
46  /* Used to restore the old INTx state on driver detach. */
47  struct pcim_intx_devres {
48  	int orig_intx;
49  };
50  
51  enum pcim_addr_devres_type {
52  	/* Default initializer. */
53  	PCIM_ADDR_DEVRES_TYPE_INVALID,
54  
55  	/* A requested region spanning an entire BAR. */
56  	PCIM_ADDR_DEVRES_TYPE_REGION,
57  
58  	/*
59  	 * A requested region spanning an entire BAR, and a mapping for
60  	 * the entire BAR.
61  	 */
62  	PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING,
63  
64  	/*
65  	 * A mapping within a BAR, either spanning the whole BAR or just a
66  	 * range.  Without a requested region.
67  	 */
68  	PCIM_ADDR_DEVRES_TYPE_MAPPING,
69  };
70  
71  /*
72   * This struct envelops IO or MEM addresses, i.e., mappings and region
73   * requests, because those are very frequently requested and released
74   * together.
75   */
76  struct pcim_addr_devres {
77  	enum pcim_addr_devres_type type;
78  	void __iomem *baseaddr;
79  	unsigned long offset;
80  	unsigned long len;
81  	int bar;
82  };
83  
pcim_addr_devres_clear(struct pcim_addr_devres * res)84  static inline void pcim_addr_devres_clear(struct pcim_addr_devres *res)
85  {
86  	memset(res, 0, sizeof(*res));
87  	res->bar = -1;
88  }
89  
90  /*
91   * The following functions, __pcim_*_region*, exist as counterparts to the
92   * versions from pci.c - which, unfortunately, can be in "hybrid mode", i.e.,
93   * sometimes managed, sometimes not.
94   *
95   * To separate the APIs cleanly, we define our own, simplified versions here.
96   */
97  
98  /**
99   * __pcim_request_region_range - Request a ranged region
100   * @pdev: PCI device the region belongs to
101   * @bar: BAR the range is within
102   * @offset: offset from the BAR's start address
103   * @maxlen: length in bytes, beginning at @offset
104   * @name: name associated with the request
105   * @req_flags: flags for the request, e.g., for kernel-exclusive requests
106   *
107   * Returns: 0 on success, a negative error code on failure.
108   *
109   * Request a range within a device's PCI BAR.  Sanity check the input.
110   */
__pcim_request_region_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long maxlen,const char * name,int req_flags)111  static int __pcim_request_region_range(struct pci_dev *pdev, int bar,
112  				       unsigned long offset,
113  				       unsigned long maxlen,
114  				       const char *name, int req_flags)
115  {
116  	resource_size_t start = pci_resource_start(pdev, bar);
117  	resource_size_t len = pci_resource_len(pdev, bar);
118  	unsigned long dev_flags = pci_resource_flags(pdev, bar);
119  
120  	if (start == 0 || len == 0) /* Unused BAR. */
121  		return 0;
122  	if (len <= offset)
123  		return -EINVAL;
124  
125  	start += offset;
126  	len -= offset;
127  
128  	if (len > maxlen && maxlen != 0)
129  		len = maxlen;
130  
131  	if (dev_flags & IORESOURCE_IO) {
132  		if (!request_region(start, len, name))
133  			return -EBUSY;
134  	} else if (dev_flags & IORESOURCE_MEM) {
135  		if (!__request_mem_region(start, len, name, req_flags))
136  			return -EBUSY;
137  	} else {
138  		/* That's not a device we can request anything on. */
139  		return -ENODEV;
140  	}
141  
142  	return 0;
143  }
144  
__pcim_release_region_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long maxlen)145  static void __pcim_release_region_range(struct pci_dev *pdev, int bar,
146  					unsigned long offset,
147  					unsigned long maxlen)
148  {
149  	resource_size_t start = pci_resource_start(pdev, bar);
150  	resource_size_t len = pci_resource_len(pdev, bar);
151  	unsigned long flags = pci_resource_flags(pdev, bar);
152  
153  	if (len <= offset || start == 0)
154  		return;
155  
156  	if (len == 0 || maxlen == 0) /* This an unused BAR. Do nothing. */
157  		return;
158  
159  	start += offset;
160  	len -= offset;
161  
162  	if (len > maxlen)
163  		len = maxlen;
164  
165  	if (flags & IORESOURCE_IO)
166  		release_region(start, len);
167  	else if (flags & IORESOURCE_MEM)
168  		release_mem_region(start, len);
169  }
170  
__pcim_request_region(struct pci_dev * pdev,int bar,const char * name,int flags)171  static int __pcim_request_region(struct pci_dev *pdev, int bar,
172  				 const char *name, int flags)
173  {
174  	unsigned long offset = 0;
175  	unsigned long len = pci_resource_len(pdev, bar);
176  
177  	return __pcim_request_region_range(pdev, bar, offset, len, name, flags);
178  }
179  
__pcim_release_region(struct pci_dev * pdev,int bar)180  static void __pcim_release_region(struct pci_dev *pdev, int bar)
181  {
182  	unsigned long offset = 0;
183  	unsigned long len = pci_resource_len(pdev, bar);
184  
185  	__pcim_release_region_range(pdev, bar, offset, len);
186  }
187  
pcim_addr_resource_release(struct device * dev,void * resource_raw)188  static void pcim_addr_resource_release(struct device *dev, void *resource_raw)
189  {
190  	struct pci_dev *pdev = to_pci_dev(dev);
191  	struct pcim_addr_devres *res = resource_raw;
192  
193  	switch (res->type) {
194  	case PCIM_ADDR_DEVRES_TYPE_REGION:
195  		__pcim_release_region(pdev, res->bar);
196  		break;
197  	case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
198  		pci_iounmap(pdev, res->baseaddr);
199  		__pcim_release_region(pdev, res->bar);
200  		break;
201  	case PCIM_ADDR_DEVRES_TYPE_MAPPING:
202  		pci_iounmap(pdev, res->baseaddr);
203  		break;
204  	default:
205  		break;
206  	}
207  }
208  
pcim_addr_devres_alloc(struct pci_dev * pdev)209  static struct pcim_addr_devres *pcim_addr_devres_alloc(struct pci_dev *pdev)
210  {
211  	struct pcim_addr_devres *res;
212  
213  	res = devres_alloc_node(pcim_addr_resource_release, sizeof(*res),
214  				GFP_KERNEL, dev_to_node(&pdev->dev));
215  	if (res)
216  		pcim_addr_devres_clear(res);
217  	return res;
218  }
219  
220  /* Just for consistency and readability. */
pcim_addr_devres_free(struct pcim_addr_devres * res)221  static inline void pcim_addr_devres_free(struct pcim_addr_devres *res)
222  {
223  	devres_free(res);
224  }
225  
226  /*
227   * Used by devres to identify a pcim_addr_devres.
228   */
pcim_addr_resources_match(struct device * dev,void * a_raw,void * b_raw)229  static int pcim_addr_resources_match(struct device *dev,
230  				     void *a_raw, void *b_raw)
231  {
232  	struct pcim_addr_devres *a, *b;
233  
234  	a = a_raw;
235  	b = b_raw;
236  
237  	if (a->type != b->type)
238  		return 0;
239  
240  	switch (a->type) {
241  	case PCIM_ADDR_DEVRES_TYPE_REGION:
242  	case PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING:
243  		return a->bar == b->bar;
244  	case PCIM_ADDR_DEVRES_TYPE_MAPPING:
245  		return a->baseaddr == b->baseaddr;
246  	default:
247  		return 0;
248  	}
249  }
250  
devm_pci_unmap_iospace(struct device * dev,void * ptr)251  static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
252  {
253  	struct resource **res = ptr;
254  
255  	pci_unmap_iospace(*res);
256  }
257  
258  /**
259   * devm_pci_remap_iospace - Managed pci_remap_iospace()
260   * @dev: Generic device to remap IO address for
261   * @res: Resource describing the I/O space
262   * @phys_addr: physical address of range to be mapped
263   *
264   * Managed pci_remap_iospace().  Map is automatically unmapped on driver
265   * detach.
266   */
devm_pci_remap_iospace(struct device * dev,const struct resource * res,phys_addr_t phys_addr)267  int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
268  			   phys_addr_t phys_addr)
269  {
270  	const struct resource **ptr;
271  	int error;
272  
273  	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
274  	if (!ptr)
275  		return -ENOMEM;
276  
277  	error = pci_remap_iospace(res, phys_addr);
278  	if (error) {
279  		devres_free(ptr);
280  	} else	{
281  		*ptr = res;
282  		devres_add(dev, ptr);
283  	}
284  
285  	return error;
286  }
287  EXPORT_SYMBOL(devm_pci_remap_iospace);
288  
289  /**
290   * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
291   * @dev: Generic device to remap IO address for
292   * @offset: Resource address to map
293   * @size: Size of map
294   *
295   * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
296   * detach.
297   */
devm_pci_remap_cfgspace(struct device * dev,resource_size_t offset,resource_size_t size)298  void __iomem *devm_pci_remap_cfgspace(struct device *dev,
299  				      resource_size_t offset,
300  				      resource_size_t size)
301  {
302  	void __iomem **ptr, *addr;
303  
304  	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
305  	if (!ptr)
306  		return NULL;
307  
308  	addr = pci_remap_cfgspace(offset, size);
309  	if (addr) {
310  		*ptr = addr;
311  		devres_add(dev, ptr);
312  	} else
313  		devres_free(ptr);
314  
315  	return addr;
316  }
317  EXPORT_SYMBOL(devm_pci_remap_cfgspace);
318  
319  /**
320   * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
321   * @dev: generic device to handle the resource for
322   * @res: configuration space resource to be handled
323   *
324   * Checks that a resource is a valid memory region, requests the memory
325   * region and ioremaps with pci_remap_cfgspace() API that ensures the
326   * proper PCI configuration space memory attributes are guaranteed.
327   *
328   * All operations are managed and will be undone on driver detach.
329   *
330   * Returns a pointer to the remapped memory or an IOMEM_ERR_PTR() encoded error
331   * code on failure. Usage example::
332   *
333   *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
334   *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
335   *	if (IS_ERR(base))
336   *		return PTR_ERR(base);
337   */
devm_pci_remap_cfg_resource(struct device * dev,struct resource * res)338  void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
339  					  struct resource *res)
340  {
341  	resource_size_t size;
342  	const char *name;
343  	void __iomem *dest_ptr;
344  
345  	BUG_ON(!dev);
346  
347  	if (!res || resource_type(res) != IORESOURCE_MEM) {
348  		dev_err(dev, "invalid resource\n");
349  		return IOMEM_ERR_PTR(-EINVAL);
350  	}
351  
352  	size = resource_size(res);
353  
354  	if (res->name)
355  		name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
356  				      res->name);
357  	else
358  		name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
359  	if (!name)
360  		return IOMEM_ERR_PTR(-ENOMEM);
361  
362  	if (!devm_request_mem_region(dev, res->start, size, name)) {
363  		dev_err(dev, "can't request region for resource %pR\n", res);
364  		return IOMEM_ERR_PTR(-EBUSY);
365  	}
366  
367  	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
368  	if (!dest_ptr) {
369  		dev_err(dev, "ioremap failed for resource %pR\n", res);
370  		devm_release_mem_region(dev, res->start, size);
371  		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
372  	}
373  
374  	return dest_ptr;
375  }
376  EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
377  
__pcim_clear_mwi(void * pdev_raw)378  static void __pcim_clear_mwi(void *pdev_raw)
379  {
380  	struct pci_dev *pdev = pdev_raw;
381  
382  	pci_clear_mwi(pdev);
383  }
384  
385  /**
386   * pcim_set_mwi - a device-managed pci_set_mwi()
387   * @pdev: the PCI device for which MWI is enabled
388   *
389   * Managed pci_set_mwi().
390   *
391   * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
392   */
pcim_set_mwi(struct pci_dev * pdev)393  int pcim_set_mwi(struct pci_dev *pdev)
394  {
395  	int ret;
396  
397  	ret = devm_add_action(&pdev->dev, __pcim_clear_mwi, pdev);
398  	if (ret != 0)
399  		return ret;
400  
401  	ret = pci_set_mwi(pdev);
402  	if (ret != 0)
403  		devm_remove_action(&pdev->dev, __pcim_clear_mwi, pdev);
404  
405  	return ret;
406  }
407  EXPORT_SYMBOL(pcim_set_mwi);
408  
mask_contains_bar(int mask,int bar)409  static inline bool mask_contains_bar(int mask, int bar)
410  {
411  	return mask & BIT(bar);
412  }
413  
414  /*
415   * This is a copy of pci_intx() used to bypass the problem of recursive
416   * function calls due to the hybrid nature of pci_intx().
417   */
__pcim_intx(struct pci_dev * pdev,int enable)418  static void __pcim_intx(struct pci_dev *pdev, int enable)
419  {
420  	u16 pci_command, new;
421  
422  	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
423  
424  	if (enable)
425  		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
426  	else
427  		new = pci_command | PCI_COMMAND_INTX_DISABLE;
428  
429  	if (new != pci_command)
430  		pci_write_config_word(pdev, PCI_COMMAND, new);
431  }
432  
pcim_intx_restore(struct device * dev,void * data)433  static void pcim_intx_restore(struct device *dev, void *data)
434  {
435  	struct pci_dev *pdev = to_pci_dev(dev);
436  	struct pcim_intx_devres *res = data;
437  
438  	__pcim_intx(pdev, res->orig_intx);
439  }
440  
get_or_create_intx_devres(struct device * dev)441  static struct pcim_intx_devres *get_or_create_intx_devres(struct device *dev)
442  {
443  	struct pcim_intx_devres *res;
444  
445  	res = devres_find(dev, pcim_intx_restore, NULL, NULL);
446  	if (res)
447  		return res;
448  
449  	res = devres_alloc(pcim_intx_restore, sizeof(*res), GFP_KERNEL);
450  	if (res)
451  		devres_add(dev, res);
452  
453  	return res;
454  }
455  
456  /**
457   * pcim_intx - managed pci_intx()
458   * @pdev: the PCI device to operate on
459   * @enable: boolean: whether to enable or disable PCI INTx
460   *
461   * Returns: 0 on success, -ENOMEM on error.
462   *
463   * Enable/disable PCI INTx for device @pdev.
464   * Restore the original state on driver detach.
465   */
pcim_intx(struct pci_dev * pdev,int enable)466  int pcim_intx(struct pci_dev *pdev, int enable)
467  {
468  	struct pcim_intx_devres *res;
469  
470  	res = get_or_create_intx_devres(&pdev->dev);
471  	if (!res)
472  		return -ENOMEM;
473  
474  	res->orig_intx = !enable;
475  	__pcim_intx(pdev, enable);
476  
477  	return 0;
478  }
479  
pcim_disable_device(void * pdev_raw)480  static void pcim_disable_device(void *pdev_raw)
481  {
482  	struct pci_dev *pdev = pdev_raw;
483  
484  	if (!pdev->pinned)
485  		pci_disable_device(pdev);
486  
487  	pdev->is_managed = false;
488  }
489  
490  /**
491   * pcim_enable_device - Managed pci_enable_device()
492   * @pdev: PCI device to be initialized
493   *
494   * Returns: 0 on success, negative error code on failure.
495   *
496   * Managed pci_enable_device(). Device will automatically be disabled on
497   * driver detach.
498   */
pcim_enable_device(struct pci_dev * pdev)499  int pcim_enable_device(struct pci_dev *pdev)
500  {
501  	int ret;
502  
503  	ret = devm_add_action(&pdev->dev, pcim_disable_device, pdev);
504  	if (ret != 0)
505  		return ret;
506  
507  	/*
508  	 * We prefer removing the action in case of an error over
509  	 * devm_add_action_or_reset() because the latter could theoretically be
510  	 * disturbed by users having pinned the device too soon.
511  	 */
512  	ret = pci_enable_device(pdev);
513  	if (ret != 0) {
514  		devm_remove_action(&pdev->dev, pcim_disable_device, pdev);
515  		return ret;
516  	}
517  
518  	pdev->is_managed = true;
519  
520  	return ret;
521  }
522  EXPORT_SYMBOL(pcim_enable_device);
523  
524  /**
525   * pcim_pin_device - Pin managed PCI device
526   * @pdev: PCI device to pin
527   *
528   * Pin managed PCI device @pdev. Pinned device won't be disabled on driver
529   * detach. @pdev must have been enabled with pcim_enable_device().
530   */
pcim_pin_device(struct pci_dev * pdev)531  void pcim_pin_device(struct pci_dev *pdev)
532  {
533  	pdev->pinned = true;
534  }
535  EXPORT_SYMBOL(pcim_pin_device);
536  
pcim_iomap_release(struct device * gendev,void * res)537  static void pcim_iomap_release(struct device *gendev, void *res)
538  {
539  	/*
540  	 * Do nothing. This is legacy code.
541  	 *
542  	 * Cleanup of the mappings is now done directly through the callbacks
543  	 * registered when creating them.
544  	 */
545  }
546  
547  /**
548   * pcim_iomap_table - access iomap allocation table (DEPRECATED)
549   * @pdev: PCI device to access iomap table for
550   *
551   * Returns:
552   * Const pointer to array of __iomem pointers on success, NULL on failure.
553   *
554   * Access iomap allocation table for @dev.  If iomap table doesn't
555   * exist and @pdev is managed, it will be allocated.  All iomaps
556   * recorded in the iomap table are automatically unmapped on driver
557   * detach.
558   *
559   * This function might sleep when the table is first allocated but can
560   * be safely called without context and guaranteed to succeed once
561   * allocated.
562   *
563   * This function is DEPRECATED. Do not use it in new code. Instead, obtain a
564   * mapping's address directly from one of the pcim_* mapping functions. For
565   * example:
566   * void __iomem \*mappy = pcim_iomap(pdev, bar, length);
567   */
pcim_iomap_table(struct pci_dev * pdev)568  void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
569  {
570  	struct pcim_iomap_devres *dr, *new_dr;
571  
572  	dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
573  	if (dr)
574  		return dr->table;
575  
576  	new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
577  				   dev_to_node(&pdev->dev));
578  	if (!new_dr)
579  		return NULL;
580  	dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
581  	return dr->table;
582  }
583  EXPORT_SYMBOL(pcim_iomap_table);
584  
585  /*
586   * Fill the legacy mapping-table, so that drivers using the old API can
587   * still get a BAR's mapping address through pcim_iomap_table().
588   */
pcim_add_mapping_to_legacy_table(struct pci_dev * pdev,void __iomem * mapping,int bar)589  static int pcim_add_mapping_to_legacy_table(struct pci_dev *pdev,
590  					    void __iomem *mapping, int bar)
591  {
592  	void __iomem **legacy_iomap_table;
593  
594  	if (bar >= PCI_STD_NUM_BARS)
595  		return -EINVAL;
596  
597  	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
598  	if (!legacy_iomap_table)
599  		return -ENOMEM;
600  
601  	/* The legacy mechanism doesn't allow for duplicate mappings. */
602  	WARN_ON(legacy_iomap_table[bar]);
603  
604  	legacy_iomap_table[bar] = mapping;
605  
606  	return 0;
607  }
608  
609  /*
610   * Remove a mapping. The table only contains whole-BAR mappings, so this will
611   * never interfere with ranged mappings.
612   */
pcim_remove_mapping_from_legacy_table(struct pci_dev * pdev,void __iomem * addr)613  static void pcim_remove_mapping_from_legacy_table(struct pci_dev *pdev,
614  						  void __iomem *addr)
615  {
616  	int bar;
617  	void __iomem **legacy_iomap_table;
618  
619  	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
620  	if (!legacy_iomap_table)
621  		return;
622  
623  	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
624  		if (legacy_iomap_table[bar] == addr) {
625  			legacy_iomap_table[bar] = NULL;
626  			return;
627  		}
628  	}
629  }
630  
631  /*
632   * The same as pcim_remove_mapping_from_legacy_table(), but identifies the
633   * mapping by its BAR index.
634   */
pcim_remove_bar_from_legacy_table(struct pci_dev * pdev,int bar)635  static void pcim_remove_bar_from_legacy_table(struct pci_dev *pdev, int bar)
636  {
637  	void __iomem **legacy_iomap_table;
638  
639  	if (bar >= PCI_STD_NUM_BARS)
640  		return;
641  
642  	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
643  	if (!legacy_iomap_table)
644  		return;
645  
646  	legacy_iomap_table[bar] = NULL;
647  }
648  
649  /**
650   * pcim_iomap - Managed pcim_iomap()
651   * @pdev: PCI device to iomap for
652   * @bar: BAR to iomap
653   * @maxlen: Maximum length of iomap
654   *
655   * Returns: __iomem pointer on success, NULL on failure.
656   *
657   * Managed pci_iomap(). Map is automatically unmapped on driver detach. If
658   * desired, unmap manually only with pcim_iounmap().
659   *
660   * This SHOULD only be used once per BAR.
661   *
662   * NOTE:
663   * Contrary to the other pcim_* functions, this function does not return an
664   * IOMEM_ERR_PTR() on failure, but a simple NULL. This is done for backwards
665   * compatibility.
666   */
pcim_iomap(struct pci_dev * pdev,int bar,unsigned long maxlen)667  void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
668  {
669  	void __iomem *mapping;
670  	struct pcim_addr_devres *res;
671  
672  	res = pcim_addr_devres_alloc(pdev);
673  	if (!res)
674  		return NULL;
675  	res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
676  
677  	mapping = pci_iomap(pdev, bar, maxlen);
678  	if (!mapping)
679  		goto err_iomap;
680  	res->baseaddr = mapping;
681  
682  	if (pcim_add_mapping_to_legacy_table(pdev, mapping, bar) != 0)
683  		goto err_table;
684  
685  	devres_add(&pdev->dev, res);
686  	return mapping;
687  
688  err_table:
689  	pci_iounmap(pdev, mapping);
690  err_iomap:
691  	pcim_addr_devres_free(res);
692  	return NULL;
693  }
694  EXPORT_SYMBOL(pcim_iomap);
695  
696  /**
697   * pcim_iounmap - Managed pci_iounmap()
698   * @pdev: PCI device to iounmap for
699   * @addr: Address to unmap
700   *
701   * Managed pci_iounmap(). @addr must have been mapped using a pcim_* mapping
702   * function.
703   */
pcim_iounmap(struct pci_dev * pdev,void __iomem * addr)704  void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
705  {
706  	struct pcim_addr_devres res_searched;
707  
708  	pcim_addr_devres_clear(&res_searched);
709  	res_searched.type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
710  	res_searched.baseaddr = addr;
711  
712  	if (devres_release(&pdev->dev, pcim_addr_resource_release,
713  			pcim_addr_resources_match, &res_searched) != 0) {
714  		/* Doesn't exist. User passed nonsense. */
715  		return;
716  	}
717  
718  	pcim_remove_mapping_from_legacy_table(pdev, addr);
719  }
720  EXPORT_SYMBOL(pcim_iounmap);
721  
722  /**
723   * pcim_iomap_region - Request and iomap a PCI BAR
724   * @pdev: PCI device to map IO resources for
725   * @bar: Index of a BAR to map
726   * @name: Name associated with the request
727   *
728   * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
729   *
730   * Mapping and region will get automatically released on driver detach. If
731   * desired, release manually only with pcim_iounmap_region().
732   */
pcim_iomap_region(struct pci_dev * pdev,int bar,const char * name)733  void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
734  				       const char *name)
735  {
736  	int ret;
737  	struct pcim_addr_devres *res;
738  
739  	res = pcim_addr_devres_alloc(pdev);
740  	if (!res)
741  		return IOMEM_ERR_PTR(-ENOMEM);
742  
743  	res->type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
744  	res->bar = bar;
745  
746  	ret = __pcim_request_region(pdev, bar, name, 0);
747  	if (ret != 0)
748  		goto err_region;
749  
750  	res->baseaddr = pci_iomap(pdev, bar, 0);
751  	if (!res->baseaddr) {
752  		ret = -EINVAL;
753  		goto err_iomap;
754  	}
755  
756  	devres_add(&pdev->dev, res);
757  	return res->baseaddr;
758  
759  err_iomap:
760  	__pcim_release_region(pdev, bar);
761  err_region:
762  	pcim_addr_devres_free(res);
763  
764  	return IOMEM_ERR_PTR(ret);
765  }
766  EXPORT_SYMBOL(pcim_iomap_region);
767  
768  /**
769   * pcim_iounmap_region - Unmap and release a PCI BAR
770   * @pdev: PCI device to operate on
771   * @bar: Index of BAR to unmap and release
772   *
773   * Unmap a BAR and release its region manually. Only pass BARs that were
774   * previously mapped by pcim_iomap_region().
775   */
pcim_iounmap_region(struct pci_dev * pdev,int bar)776  static void pcim_iounmap_region(struct pci_dev *pdev, int bar)
777  {
778  	struct pcim_addr_devres res_searched;
779  
780  	pcim_addr_devres_clear(&res_searched);
781  	res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION_MAPPING;
782  	res_searched.bar = bar;
783  
784  	devres_release(&pdev->dev, pcim_addr_resource_release,
785  			pcim_addr_resources_match, &res_searched);
786  }
787  
788  /**
789   * pcim_iomap_regions - Request and iomap PCI BARs (DEPRECATED)
790   * @pdev: PCI device to map IO resources for
791   * @mask: Mask of BARs to request and iomap
792   * @name: Name associated with the requests
793   *
794   * Returns: 0 on success, negative error code on failure.
795   *
796   * Request and iomap regions specified by @mask.
797   *
798   * This function is DEPRECATED. Do not use it in new code.
799   * Use pcim_iomap_region() instead.
800   */
pcim_iomap_regions(struct pci_dev * pdev,int mask,const char * name)801  int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
802  {
803  	int ret;
804  	int bar;
805  	void __iomem *mapping;
806  
807  	for (bar = 0; bar < DEVICE_COUNT_RESOURCE; bar++) {
808  		if (!mask_contains_bar(mask, bar))
809  			continue;
810  
811  		mapping = pcim_iomap_region(pdev, bar, name);
812  		if (IS_ERR(mapping)) {
813  			ret = PTR_ERR(mapping);
814  			goto err;
815  		}
816  		ret = pcim_add_mapping_to_legacy_table(pdev, mapping, bar);
817  		if (ret != 0)
818  			goto err;
819  	}
820  
821  	return 0;
822  
823  err:
824  	while (--bar >= 0) {
825  		pcim_iounmap_region(pdev, bar);
826  		pcim_remove_bar_from_legacy_table(pdev, bar);
827  	}
828  
829  	return ret;
830  }
831  EXPORT_SYMBOL(pcim_iomap_regions);
832  
_pcim_request_region(struct pci_dev * pdev,int bar,const char * name,int request_flags)833  static int _pcim_request_region(struct pci_dev *pdev, int bar, const char *name,
834  				int request_flags)
835  {
836  	int ret;
837  	struct pcim_addr_devres *res;
838  
839  	res = pcim_addr_devres_alloc(pdev);
840  	if (!res)
841  		return -ENOMEM;
842  	res->type = PCIM_ADDR_DEVRES_TYPE_REGION;
843  	res->bar = bar;
844  
845  	ret = __pcim_request_region(pdev, bar, name, request_flags);
846  	if (ret != 0) {
847  		pcim_addr_devres_free(res);
848  		return ret;
849  	}
850  
851  	devres_add(&pdev->dev, res);
852  	return 0;
853  }
854  
855  /**
856   * pcim_request_region - Request a PCI BAR
857   * @pdev: PCI device to requestion region for
858   * @bar: Index of BAR to request
859   * @name: Name associated with the request
860   *
861   * Returns: 0 on success, a negative error code on failure.
862   *
863   * Request region specified by @bar.
864   *
865   * The region will automatically be released on driver detach. If desired,
866   * release manually only with pcim_release_region().
867   */
pcim_request_region(struct pci_dev * pdev,int bar,const char * name)868  int pcim_request_region(struct pci_dev *pdev, int bar, const char *name)
869  {
870  	return _pcim_request_region(pdev, bar, name, 0);
871  }
872  EXPORT_SYMBOL(pcim_request_region);
873  
874  /**
875   * pcim_request_region_exclusive - Request a PCI BAR exclusively
876   * @pdev: PCI device to requestion region for
877   * @bar: Index of BAR to request
878   * @name: Name associated with the request
879   *
880   * Returns: 0 on success, a negative error code on failure.
881   *
882   * Request region specified by @bar exclusively.
883   *
884   * The region will automatically be released on driver detach. If desired,
885   * release manually only with pcim_release_region().
886   */
pcim_request_region_exclusive(struct pci_dev * pdev,int bar,const char * name)887  int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name)
888  {
889  	return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE);
890  }
891  
892  /**
893   * pcim_release_region - Release a PCI BAR
894   * @pdev: PCI device to operate on
895   * @bar: Index of BAR to release
896   *
897   * Release a region manually that was previously requested by
898   * pcim_request_region().
899   */
pcim_release_region(struct pci_dev * pdev,int bar)900  void pcim_release_region(struct pci_dev *pdev, int bar)
901  {
902  	struct pcim_addr_devres res_searched;
903  
904  	pcim_addr_devres_clear(&res_searched);
905  	res_searched.type = PCIM_ADDR_DEVRES_TYPE_REGION;
906  	res_searched.bar = bar;
907  
908  	devres_release(&pdev->dev, pcim_addr_resource_release,
909  			pcim_addr_resources_match, &res_searched);
910  }
911  
912  
913  /**
914   * pcim_release_all_regions - Release all regions of a PCI-device
915   * @pdev: the PCI device
916   *
917   * Release all regions previously requested through pcim_request_region()
918   * or pcim_request_all_regions().
919   *
920   * Can be called from any context, i.e., not necessarily as a counterpart to
921   * pcim_request_all_regions().
922   */
pcim_release_all_regions(struct pci_dev * pdev)923  static void pcim_release_all_regions(struct pci_dev *pdev)
924  {
925  	int bar;
926  
927  	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
928  		pcim_release_region(pdev, bar);
929  }
930  
931  /**
932   * pcim_request_all_regions - Request all regions
933   * @pdev: PCI device to map IO resources for
934   * @name: name associated with the request
935   *
936   * Returns: 0 on success, negative error code on failure.
937   *
938   * Requested regions will automatically be released at driver detach. If
939   * desired, release individual regions with pcim_release_region() or all of
940   * them at once with pcim_release_all_regions().
941   */
pcim_request_all_regions(struct pci_dev * pdev,const char * name)942  static int pcim_request_all_regions(struct pci_dev *pdev, const char *name)
943  {
944  	int ret;
945  	int bar;
946  
947  	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
948  		ret = pcim_request_region(pdev, bar, name);
949  		if (ret != 0)
950  			goto err;
951  	}
952  
953  	return 0;
954  
955  err:
956  	pcim_release_all_regions(pdev);
957  
958  	return ret;
959  }
960  
961  /**
962   * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
963   *			(DEPRECATED)
964   * @pdev: PCI device to map IO resources for
965   * @mask: Mask of BARs to iomap
966   * @name: Name associated with the requests
967   *
968   * Returns: 0 on success, negative error code on failure.
969   *
970   * Request all PCI BARs and iomap regions specified by @mask.
971   *
972   * To release these resources manually, call pcim_release_region() for the
973   * regions and pcim_iounmap() for the mappings.
974   *
975   * This function is DEPRECATED. Don't use it in new code. Instead, use one
976   * of the pcim_* region request functions in combination with a pcim_*
977   * mapping function.
978   */
pcim_iomap_regions_request_all(struct pci_dev * pdev,int mask,const char * name)979  int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
980  				   const char *name)
981  {
982  	int bar;
983  	int ret;
984  	void __iomem **legacy_iomap_table;
985  
986  	ret = pcim_request_all_regions(pdev, name);
987  	if (ret != 0)
988  		return ret;
989  
990  	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
991  		if (!mask_contains_bar(mask, bar))
992  			continue;
993  		if (!pcim_iomap(pdev, bar, 0))
994  			goto err;
995  	}
996  
997  	return 0;
998  
999  err:
1000  	/*
1001  	 * If bar is larger than 0, then pcim_iomap() above has most likely
1002  	 * failed because of -EINVAL. If it is equal 0, most likely the table
1003  	 * couldn't be created, indicating -ENOMEM.
1004  	 */
1005  	ret = bar > 0 ? -EINVAL : -ENOMEM;
1006  	legacy_iomap_table = (void __iomem **)pcim_iomap_table(pdev);
1007  
1008  	while (--bar >= 0)
1009  		pcim_iounmap(pdev, legacy_iomap_table[bar]);
1010  
1011  	pcim_release_all_regions(pdev);
1012  
1013  	return ret;
1014  }
1015  EXPORT_SYMBOL(pcim_iomap_regions_request_all);
1016  
1017  /**
1018   * pcim_iounmap_regions - Unmap and release PCI BARs
1019   * @pdev: PCI device to map IO resources for
1020   * @mask: Mask of BARs to unmap and release
1021   *
1022   * Unmap and release regions specified by @mask.
1023   */
pcim_iounmap_regions(struct pci_dev * pdev,int mask)1024  void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
1025  {
1026  	int i;
1027  
1028  	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
1029  		if (!mask_contains_bar(mask, i))
1030  			continue;
1031  
1032  		pcim_iounmap_region(pdev, i);
1033  		pcim_remove_bar_from_legacy_table(pdev, i);
1034  	}
1035  }
1036  EXPORT_SYMBOL(pcim_iounmap_regions);
1037  
1038  /**
1039   * pcim_iomap_range - Create a ranged __iomap mapping within a PCI BAR
1040   * @pdev: PCI device to map IO resources for
1041   * @bar: Index of the BAR
1042   * @offset: Offset from the begin of the BAR
1043   * @len: Length in bytes for the mapping
1044   *
1045   * Returns: __iomem pointer on success, an IOMEM_ERR_PTR on failure.
1046   *
1047   * Creates a new IO-Mapping within the specified @bar, ranging from @offset to
1048   * @offset + @len.
1049   *
1050   * The mapping will automatically get unmapped on driver detach. If desired,
1051   * release manually only with pcim_iounmap().
1052   */
pcim_iomap_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long len)1053  void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
1054  		unsigned long offset, unsigned long len)
1055  {
1056  	void __iomem *mapping;
1057  	struct pcim_addr_devres *res;
1058  
1059  	res = pcim_addr_devres_alloc(pdev);
1060  	if (!res)
1061  		return IOMEM_ERR_PTR(-ENOMEM);
1062  
1063  	mapping = pci_iomap_range(pdev, bar, offset, len);
1064  	if (!mapping) {
1065  		pcim_addr_devres_free(res);
1066  		return IOMEM_ERR_PTR(-EINVAL);
1067  	}
1068  
1069  	res->type = PCIM_ADDR_DEVRES_TYPE_MAPPING;
1070  	res->baseaddr = mapping;
1071  
1072  	/*
1073  	 * Ranged mappings don't get added to the legacy-table, since the table
1074  	 * only ever keeps track of whole BARs.
1075  	 */
1076  
1077  	devres_add(&pdev->dev, res);
1078  	return mapping;
1079  }
1080  EXPORT_SYMBOL(pcim_iomap_range);
1081