1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * PCI Bus Services, see include/linux/pci.h for further explanation.
4   *
5   * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6   * David Mosberger-Tang
7   *
8   * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
9   */
10  
11  #include <linux/acpi.h>
12  #include <linux/kernel.h>
13  #include <linux/delay.h>
14  #include <linux/dmi.h>
15  #include <linux/init.h>
16  #include <linux/msi.h>
17  #include <linux/of.h>
18  #include <linux/pci.h>
19  #include <linux/pm.h>
20  #include <linux/slab.h>
21  #include <linux/module.h>
22  #include <linux/spinlock.h>
23  #include <linux/string.h>
24  #include <linux/log2.h>
25  #include <linux/logic_pio.h>
26  #include <linux/pm_wakeup.h>
27  #include <linux/device.h>
28  #include <linux/pm_runtime.h>
29  #include <linux/pci_hotplug.h>
30  #include <linux/vmalloc.h>
31  #include <asm/dma.h>
32  #include <linux/aer.h>
33  #include <linux/bitfield.h>
34  #include "pci.h"
35  
36  DEFINE_MUTEX(pci_slot_mutex);
37  
38  const char *pci_power_names[] = {
39  	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
40  };
41  EXPORT_SYMBOL_GPL(pci_power_names);
42  
43  #ifdef CONFIG_X86_32
44  int isa_dma_bridge_buggy;
45  EXPORT_SYMBOL(isa_dma_bridge_buggy);
46  #endif
47  
48  int pci_pci_problems;
49  EXPORT_SYMBOL(pci_pci_problems);
50  
51  unsigned int pci_pm_d3hot_delay;
52  
53  static void pci_pme_list_scan(struct work_struct *work);
54  
55  static LIST_HEAD(pci_pme_list);
56  static DEFINE_MUTEX(pci_pme_list_mutex);
57  static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
58  
59  struct pci_pme_device {
60  	struct list_head list;
61  	struct pci_dev *dev;
62  };
63  
64  #define PME_TIMEOUT 1000 /* How long between PME checks */
65  
66  /*
67   * Following exit from Conventional Reset, devices must be ready within 1 sec
68   * (PCIe r6.0 sec 6.6.1).  A D3cold to D0 transition implies a Conventional
69   * Reset (PCIe r6.0 sec 5.8).
70   */
71  #define PCI_RESET_WAIT 1000 /* msec */
72  
73  /*
74   * Devices may extend the 1 sec period through Request Retry Status
75   * completions (PCIe r6.0 sec 2.3.1).  The spec does not provide an upper
76   * limit, but 60 sec ought to be enough for any device to become
77   * responsive.
78   */
79  #define PCIE_RESET_READY_POLL_MS 60000 /* msec */
80  
pci_dev_d3_sleep(struct pci_dev * dev)81  static void pci_dev_d3_sleep(struct pci_dev *dev)
82  {
83  	unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
84  	unsigned int upper;
85  
86  	if (delay_ms) {
87  		/* Use a 20% upper bound, 1ms minimum */
88  		upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
89  		usleep_range(delay_ms * USEC_PER_MSEC,
90  			     (delay_ms + upper) * USEC_PER_MSEC);
91  	}
92  }
93  
pci_reset_supported(struct pci_dev * dev)94  bool pci_reset_supported(struct pci_dev *dev)
95  {
96  	return dev->reset_methods[0] != 0;
97  }
98  
99  #ifdef CONFIG_PCI_DOMAINS
100  int pci_domains_supported = 1;
101  #endif
102  
103  #define DEFAULT_CARDBUS_IO_SIZE		(256)
104  #define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
105  /* pci=cbmemsize=nnM,cbiosize=nn can override this */
106  unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
107  unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
108  
109  #define DEFAULT_HOTPLUG_IO_SIZE		(256)
110  #define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
111  #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
112  /* hpiosize=nn can override this */
113  unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
114  /*
115   * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
116   * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
117   * pci=hpmemsize=nnM overrides both
118   */
119  unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
120  unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
121  
122  #define DEFAULT_HOTPLUG_BUS_SIZE	1
123  unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
124  
125  
126  /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
127  #ifdef CONFIG_PCIE_BUS_TUNE_OFF
128  enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
129  #elif defined CONFIG_PCIE_BUS_SAFE
130  enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
131  #elif defined CONFIG_PCIE_BUS_PERFORMANCE
132  enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
133  #elif defined CONFIG_PCIE_BUS_PEER2PEER
134  enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
135  #else
136  enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
137  #endif
138  
139  /*
140   * The default CLS is used if arch didn't set CLS explicitly and not
141   * all pci devices agree on the same value.  Arch can override either
142   * the dfl or actual value as it sees fit.  Don't forget this is
143   * measured in 32-bit words, not bytes.
144   */
145  u8 pci_dfl_cache_line_size __ro_after_init = L1_CACHE_BYTES >> 2;
146  u8 pci_cache_line_size __ro_after_init ;
147  
148  /*
149   * If we set up a device for bus mastering, we need to check the latency
150   * timer as certain BIOSes forget to set it properly.
151   */
152  unsigned int pcibios_max_latency = 255;
153  
154  /* If set, the PCIe ARI capability will not be used. */
155  static bool pcie_ari_disabled;
156  
157  /* If set, the PCIe ATS capability will not be used. */
158  static bool pcie_ats_disabled;
159  
160  /* If set, the PCI config space of each device is printed during boot. */
161  bool pci_early_dump;
162  
pci_ats_disabled(void)163  bool pci_ats_disabled(void)
164  {
165  	return pcie_ats_disabled;
166  }
167  EXPORT_SYMBOL_GPL(pci_ats_disabled);
168  
169  /* Disable bridge_d3 for all PCIe ports */
170  static bool pci_bridge_d3_disable;
171  /* Force bridge_d3 for all PCIe ports */
172  static bool pci_bridge_d3_force;
173  
pcie_port_pm_setup(char * str)174  static int __init pcie_port_pm_setup(char *str)
175  {
176  	if (!strcmp(str, "off"))
177  		pci_bridge_d3_disable = true;
178  	else if (!strcmp(str, "force"))
179  		pci_bridge_d3_force = true;
180  	return 1;
181  }
182  __setup("pcie_port_pm=", pcie_port_pm_setup);
183  
184  /**
185   * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
186   * @bus: pointer to PCI bus structure to search
187   *
188   * Given a PCI bus, returns the highest PCI bus number present in the set
189   * including the given PCI bus and its list of child PCI buses.
190   */
pci_bus_max_busnr(struct pci_bus * bus)191  unsigned char pci_bus_max_busnr(struct pci_bus *bus)
192  {
193  	struct pci_bus *tmp;
194  	unsigned char max, n;
195  
196  	max = bus->busn_res.end;
197  	list_for_each_entry(tmp, &bus->children, node) {
198  		n = pci_bus_max_busnr(tmp);
199  		if (n > max)
200  			max = n;
201  	}
202  	return max;
203  }
204  EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
205  
206  /**
207   * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
208   * @pdev: the PCI device
209   *
210   * Returns error bits set in PCI_STATUS and clears them.
211   */
pci_status_get_and_clear_errors(struct pci_dev * pdev)212  int pci_status_get_and_clear_errors(struct pci_dev *pdev)
213  {
214  	u16 status;
215  	int ret;
216  
217  	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
218  	if (ret != PCIBIOS_SUCCESSFUL)
219  		return -EIO;
220  
221  	status &= PCI_STATUS_ERROR_BITS;
222  	if (status)
223  		pci_write_config_word(pdev, PCI_STATUS, status);
224  
225  	return status;
226  }
227  EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
228  
229  #ifdef CONFIG_HAS_IOMEM
__pci_ioremap_resource(struct pci_dev * pdev,int bar,bool write_combine)230  static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
231  					    bool write_combine)
232  {
233  	struct resource *res = &pdev->resource[bar];
234  	resource_size_t start = res->start;
235  	resource_size_t size = resource_size(res);
236  
237  	/*
238  	 * Make sure the BAR is actually a memory resource, not an IO resource
239  	 */
240  	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
241  		pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
242  		return NULL;
243  	}
244  
245  	if (write_combine)
246  		return ioremap_wc(start, size);
247  
248  	return ioremap(start, size);
249  }
250  
pci_ioremap_bar(struct pci_dev * pdev,int bar)251  void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
252  {
253  	return __pci_ioremap_resource(pdev, bar, false);
254  }
255  EXPORT_SYMBOL_GPL(pci_ioremap_bar);
256  
pci_ioremap_wc_bar(struct pci_dev * pdev,int bar)257  void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
258  {
259  	return __pci_ioremap_resource(pdev, bar, true);
260  }
261  EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
262  #endif
263  
264  /**
265   * pci_dev_str_match_path - test if a path string matches a device
266   * @dev: the PCI device to test
267   * @path: string to match the device against
268   * @endptr: pointer to the string after the match
269   *
270   * Test if a string (typically from a kernel parameter) formatted as a
271   * path of device/function addresses matches a PCI device. The string must
272   * be of the form:
273   *
274   *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
275   *
276   * A path for a device can be obtained using 'lspci -t'.  Using a path
277   * is more robust against bus renumbering than using only a single bus,
278   * device and function address.
279   *
280   * Returns 1 if the string matches the device, 0 if it does not and
281   * a negative error code if it fails to parse the string.
282   */
pci_dev_str_match_path(struct pci_dev * dev,const char * path,const char ** endptr)283  static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
284  				  const char **endptr)
285  {
286  	int ret;
287  	unsigned int seg, bus, slot, func;
288  	char *wpath, *p;
289  	char end;
290  
291  	*endptr = strchrnul(path, ';');
292  
293  	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
294  	if (!wpath)
295  		return -ENOMEM;
296  
297  	while (1) {
298  		p = strrchr(wpath, '/');
299  		if (!p)
300  			break;
301  		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
302  		if (ret != 2) {
303  			ret = -EINVAL;
304  			goto free_and_exit;
305  		}
306  
307  		if (dev->devfn != PCI_DEVFN(slot, func)) {
308  			ret = 0;
309  			goto free_and_exit;
310  		}
311  
312  		/*
313  		 * Note: we don't need to get a reference to the upstream
314  		 * bridge because we hold a reference to the top level
315  		 * device which should hold a reference to the bridge,
316  		 * and so on.
317  		 */
318  		dev = pci_upstream_bridge(dev);
319  		if (!dev) {
320  			ret = 0;
321  			goto free_and_exit;
322  		}
323  
324  		*p = 0;
325  	}
326  
327  	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
328  		     &func, &end);
329  	if (ret != 4) {
330  		seg = 0;
331  		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
332  		if (ret != 3) {
333  			ret = -EINVAL;
334  			goto free_and_exit;
335  		}
336  	}
337  
338  	ret = (seg == pci_domain_nr(dev->bus) &&
339  	       bus == dev->bus->number &&
340  	       dev->devfn == PCI_DEVFN(slot, func));
341  
342  free_and_exit:
343  	kfree(wpath);
344  	return ret;
345  }
346  
347  /**
348   * pci_dev_str_match - test if a string matches a device
349   * @dev: the PCI device to test
350   * @p: string to match the device against
351   * @endptr: pointer to the string after the match
352   *
353   * Test if a string (typically from a kernel parameter) matches a specified
354   * PCI device. The string may be of one of the following formats:
355   *
356   *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
357   *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
358   *
359   * The first format specifies a PCI bus/device/function address which
360   * may change if new hardware is inserted, if motherboard firmware changes,
361   * or due to changes caused in kernel parameters. If the domain is
362   * left unspecified, it is taken to be 0.  In order to be robust against
363   * bus renumbering issues, a path of PCI device/function numbers may be used
364   * to address the specific device.  The path for a device can be determined
365   * through the use of 'lspci -t'.
366   *
367   * The second format matches devices using IDs in the configuration
368   * space which may match multiple devices in the system. A value of 0
369   * for any field will match all devices. (Note: this differs from
370   * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
371   * legacy reasons and convenience so users don't have to specify
372   * FFFFFFFFs on the command line.)
373   *
374   * Returns 1 if the string matches the device, 0 if it does not and
375   * a negative error code if the string cannot be parsed.
376   */
pci_dev_str_match(struct pci_dev * dev,const char * p,const char ** endptr)377  static int pci_dev_str_match(struct pci_dev *dev, const char *p,
378  			     const char **endptr)
379  {
380  	int ret;
381  	int count;
382  	unsigned short vendor, device, subsystem_vendor, subsystem_device;
383  
384  	if (strncmp(p, "pci:", 4) == 0) {
385  		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
386  		p += 4;
387  		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
388  			     &subsystem_vendor, &subsystem_device, &count);
389  		if (ret != 4) {
390  			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
391  			if (ret != 2)
392  				return -EINVAL;
393  
394  			subsystem_vendor = 0;
395  			subsystem_device = 0;
396  		}
397  
398  		p += count;
399  
400  		if ((!vendor || vendor == dev->vendor) &&
401  		    (!device || device == dev->device) &&
402  		    (!subsystem_vendor ||
403  			    subsystem_vendor == dev->subsystem_vendor) &&
404  		    (!subsystem_device ||
405  			    subsystem_device == dev->subsystem_device))
406  			goto found;
407  	} else {
408  		/*
409  		 * PCI Bus, Device, Function IDs are specified
410  		 * (optionally, may include a path of devfns following it)
411  		 */
412  		ret = pci_dev_str_match_path(dev, p, &p);
413  		if (ret < 0)
414  			return ret;
415  		else if (ret)
416  			goto found;
417  	}
418  
419  	*endptr = p;
420  	return 0;
421  
422  found:
423  	*endptr = p;
424  	return 1;
425  }
426  
__pci_find_next_cap_ttl(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap,int * ttl)427  static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
428  				  u8 pos, int cap, int *ttl)
429  {
430  	u8 id;
431  	u16 ent;
432  
433  	pci_bus_read_config_byte(bus, devfn, pos, &pos);
434  
435  	while ((*ttl)--) {
436  		if (pos < 0x40)
437  			break;
438  		pos &= ~3;
439  		pci_bus_read_config_word(bus, devfn, pos, &ent);
440  
441  		id = ent & 0xff;
442  		if (id == 0xff)
443  			break;
444  		if (id == cap)
445  			return pos;
446  		pos = (ent >> 8);
447  	}
448  	return 0;
449  }
450  
__pci_find_next_cap(struct pci_bus * bus,unsigned int devfn,u8 pos,int cap)451  static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
452  			      u8 pos, int cap)
453  {
454  	int ttl = PCI_FIND_CAP_TTL;
455  
456  	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
457  }
458  
pci_find_next_capability(struct pci_dev * dev,u8 pos,int cap)459  u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
460  {
461  	return __pci_find_next_cap(dev->bus, dev->devfn,
462  				   pos + PCI_CAP_LIST_NEXT, cap);
463  }
464  EXPORT_SYMBOL_GPL(pci_find_next_capability);
465  
__pci_bus_find_cap_start(struct pci_bus * bus,unsigned int devfn,u8 hdr_type)466  static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
467  				    unsigned int devfn, u8 hdr_type)
468  {
469  	u16 status;
470  
471  	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
472  	if (!(status & PCI_STATUS_CAP_LIST))
473  		return 0;
474  
475  	switch (hdr_type) {
476  	case PCI_HEADER_TYPE_NORMAL:
477  	case PCI_HEADER_TYPE_BRIDGE:
478  		return PCI_CAPABILITY_LIST;
479  	case PCI_HEADER_TYPE_CARDBUS:
480  		return PCI_CB_CAPABILITY_LIST;
481  	}
482  
483  	return 0;
484  }
485  
486  /**
487   * pci_find_capability - query for devices' capabilities
488   * @dev: PCI device to query
489   * @cap: capability code
490   *
491   * Tell if a device supports a given PCI capability.
492   * Returns the address of the requested capability structure within the
493   * device's PCI configuration space or 0 in case the device does not
494   * support it.  Possible values for @cap include:
495   *
496   *  %PCI_CAP_ID_PM           Power Management
497   *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
498   *  %PCI_CAP_ID_VPD          Vital Product Data
499   *  %PCI_CAP_ID_SLOTID       Slot Identification
500   *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
501   *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
502   *  %PCI_CAP_ID_PCIX         PCI-X
503   *  %PCI_CAP_ID_EXP          PCI Express
504   */
pci_find_capability(struct pci_dev * dev,int cap)505  u8 pci_find_capability(struct pci_dev *dev, int cap)
506  {
507  	u8 pos;
508  
509  	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
510  	if (pos)
511  		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
512  
513  	return pos;
514  }
515  EXPORT_SYMBOL(pci_find_capability);
516  
517  /**
518   * pci_bus_find_capability - query for devices' capabilities
519   * @bus: the PCI bus to query
520   * @devfn: PCI device to query
521   * @cap: capability code
522   *
523   * Like pci_find_capability() but works for PCI devices that do not have a
524   * pci_dev structure set up yet.
525   *
526   * Returns the address of the requested capability structure within the
527   * device's PCI configuration space or 0 in case the device does not
528   * support it.
529   */
pci_bus_find_capability(struct pci_bus * bus,unsigned int devfn,int cap)530  u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
531  {
532  	u8 hdr_type, pos;
533  
534  	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
535  
536  	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & PCI_HEADER_TYPE_MASK);
537  	if (pos)
538  		pos = __pci_find_next_cap(bus, devfn, pos, cap);
539  
540  	return pos;
541  }
542  EXPORT_SYMBOL(pci_bus_find_capability);
543  
544  /**
545   * pci_find_next_ext_capability - Find an extended capability
546   * @dev: PCI device to query
547   * @start: address at which to start looking (0 to start at beginning of list)
548   * @cap: capability code
549   *
550   * Returns the address of the next matching extended capability structure
551   * within the device's PCI configuration space or 0 if the device does
552   * not support it.  Some capabilities can occur several times, e.g., the
553   * vendor-specific capability, and this provides a way to find them all.
554   */
pci_find_next_ext_capability(struct pci_dev * dev,u16 start,int cap)555  u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
556  {
557  	u32 header;
558  	int ttl;
559  	u16 pos = PCI_CFG_SPACE_SIZE;
560  
561  	/* minimum 8 bytes per capability */
562  	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
563  
564  	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
565  		return 0;
566  
567  	if (start)
568  		pos = start;
569  
570  	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
571  		return 0;
572  
573  	/*
574  	 * If we have no capabilities, this is indicated by cap ID,
575  	 * cap version and next pointer all being 0.
576  	 */
577  	if (header == 0)
578  		return 0;
579  
580  	while (ttl-- > 0) {
581  		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
582  			return pos;
583  
584  		pos = PCI_EXT_CAP_NEXT(header);
585  		if (pos < PCI_CFG_SPACE_SIZE)
586  			break;
587  
588  		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
589  			break;
590  	}
591  
592  	return 0;
593  }
594  EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
595  
596  /**
597   * pci_find_ext_capability - Find an extended capability
598   * @dev: PCI device to query
599   * @cap: capability code
600   *
601   * Returns the address of the requested extended capability structure
602   * within the device's PCI configuration space or 0 if the device does
603   * not support it.  Possible values for @cap include:
604   *
605   *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
606   *  %PCI_EXT_CAP_ID_VC		Virtual Channel
607   *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
608   *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
609   */
pci_find_ext_capability(struct pci_dev * dev,int cap)610  u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
611  {
612  	return pci_find_next_ext_capability(dev, 0, cap);
613  }
614  EXPORT_SYMBOL_GPL(pci_find_ext_capability);
615  
616  /**
617   * pci_get_dsn - Read and return the 8-byte Device Serial Number
618   * @dev: PCI device to query
619   *
620   * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
621   * Number.
622   *
623   * Returns the DSN, or zero if the capability does not exist.
624   */
pci_get_dsn(struct pci_dev * dev)625  u64 pci_get_dsn(struct pci_dev *dev)
626  {
627  	u32 dword;
628  	u64 dsn;
629  	int pos;
630  
631  	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
632  	if (!pos)
633  		return 0;
634  
635  	/*
636  	 * The Device Serial Number is two dwords offset 4 bytes from the
637  	 * capability position. The specification says that the first dword is
638  	 * the lower half, and the second dword is the upper half.
639  	 */
640  	pos += 4;
641  	pci_read_config_dword(dev, pos, &dword);
642  	dsn = (u64)dword;
643  	pci_read_config_dword(dev, pos + 4, &dword);
644  	dsn |= ((u64)dword) << 32;
645  
646  	return dsn;
647  }
648  EXPORT_SYMBOL_GPL(pci_get_dsn);
649  
__pci_find_next_ht_cap(struct pci_dev * dev,u8 pos,int ht_cap)650  static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
651  {
652  	int rc, ttl = PCI_FIND_CAP_TTL;
653  	u8 cap, mask;
654  
655  	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
656  		mask = HT_3BIT_CAP_MASK;
657  	else
658  		mask = HT_5BIT_CAP_MASK;
659  
660  	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
661  				      PCI_CAP_ID_HT, &ttl);
662  	while (pos) {
663  		rc = pci_read_config_byte(dev, pos + 3, &cap);
664  		if (rc != PCIBIOS_SUCCESSFUL)
665  			return 0;
666  
667  		if ((cap & mask) == ht_cap)
668  			return pos;
669  
670  		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
671  					      pos + PCI_CAP_LIST_NEXT,
672  					      PCI_CAP_ID_HT, &ttl);
673  	}
674  
675  	return 0;
676  }
677  
678  /**
679   * pci_find_next_ht_capability - query a device's HyperTransport capabilities
680   * @dev: PCI device to query
681   * @pos: Position from which to continue searching
682   * @ht_cap: HyperTransport capability code
683   *
684   * To be used in conjunction with pci_find_ht_capability() to search for
685   * all capabilities matching @ht_cap. @pos should always be a value returned
686   * from pci_find_ht_capability().
687   *
688   * NB. To be 100% safe against broken PCI devices, the caller should take
689   * steps to avoid an infinite loop.
690   */
pci_find_next_ht_capability(struct pci_dev * dev,u8 pos,int ht_cap)691  u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
692  {
693  	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
694  }
695  EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
696  
697  /**
698   * pci_find_ht_capability - query a device's HyperTransport capabilities
699   * @dev: PCI device to query
700   * @ht_cap: HyperTransport capability code
701   *
702   * Tell if a device supports a given HyperTransport capability.
703   * Returns an address within the device's PCI configuration space
704   * or 0 in case the device does not support the request capability.
705   * The address points to the PCI capability, of type PCI_CAP_ID_HT,
706   * which has a HyperTransport capability matching @ht_cap.
707   */
pci_find_ht_capability(struct pci_dev * dev,int ht_cap)708  u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
709  {
710  	u8 pos;
711  
712  	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
713  	if (pos)
714  		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
715  
716  	return pos;
717  }
718  EXPORT_SYMBOL_GPL(pci_find_ht_capability);
719  
720  /**
721   * pci_find_vsec_capability - Find a vendor-specific extended capability
722   * @dev: PCI device to query
723   * @vendor: Vendor ID for which capability is defined
724   * @cap: Vendor-specific capability ID
725   *
726   * If @dev has Vendor ID @vendor, search for a VSEC capability with
727   * VSEC ID @cap. If found, return the capability offset in
728   * config space; otherwise return 0.
729   */
pci_find_vsec_capability(struct pci_dev * dev,u16 vendor,int cap)730  u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
731  {
732  	u16 vsec = 0;
733  	u32 header;
734  	int ret;
735  
736  	if (vendor != dev->vendor)
737  		return 0;
738  
739  	while ((vsec = pci_find_next_ext_capability(dev, vsec,
740  						     PCI_EXT_CAP_ID_VNDR))) {
741  		ret = pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
742  		if (ret != PCIBIOS_SUCCESSFUL)
743  			continue;
744  
745  		if (PCI_VNDR_HEADER_ID(header) == cap)
746  			return vsec;
747  	}
748  
749  	return 0;
750  }
751  EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
752  
753  /**
754   * pci_find_dvsec_capability - Find DVSEC for vendor
755   * @dev: PCI device to query
756   * @vendor: Vendor ID to match for the DVSEC
757   * @dvsec: Designated Vendor-specific capability ID
758   *
759   * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
760   * offset in config space; otherwise return 0.
761   */
pci_find_dvsec_capability(struct pci_dev * dev,u16 vendor,u16 dvsec)762  u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
763  {
764  	int pos;
765  
766  	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
767  	if (!pos)
768  		return 0;
769  
770  	while (pos) {
771  		u16 v, id;
772  
773  		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
774  		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
775  		if (vendor == v && dvsec == id)
776  			return pos;
777  
778  		pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
779  	}
780  
781  	return 0;
782  }
783  EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
784  
785  /**
786   * pci_find_parent_resource - return resource region of parent bus of given
787   *			      region
788   * @dev: PCI device structure contains resources to be searched
789   * @res: child resource record for which parent is sought
790   *
791   * For given resource region of given device, return the resource region of
792   * parent bus the given region is contained in.
793   */
pci_find_parent_resource(const struct pci_dev * dev,struct resource * res)794  struct resource *pci_find_parent_resource(const struct pci_dev *dev,
795  					  struct resource *res)
796  {
797  	const struct pci_bus *bus = dev->bus;
798  	struct resource *r;
799  
800  	pci_bus_for_each_resource(bus, r) {
801  		if (!r)
802  			continue;
803  		if (resource_contains(r, res)) {
804  
805  			/*
806  			 * If the window is prefetchable but the BAR is
807  			 * not, the allocator made a mistake.
808  			 */
809  			if (r->flags & IORESOURCE_PREFETCH &&
810  			    !(res->flags & IORESOURCE_PREFETCH))
811  				return NULL;
812  
813  			/*
814  			 * If we're below a transparent bridge, there may
815  			 * be both a positively-decoded aperture and a
816  			 * subtractively-decoded region that contain the BAR.
817  			 * We want the positively-decoded one, so this depends
818  			 * on pci_bus_for_each_resource() giving us those
819  			 * first.
820  			 */
821  			return r;
822  		}
823  	}
824  	return NULL;
825  }
826  EXPORT_SYMBOL(pci_find_parent_resource);
827  
828  /**
829   * pci_find_resource - Return matching PCI device resource
830   * @dev: PCI device to query
831   * @res: Resource to look for
832   *
833   * Goes over standard PCI resources (BARs) and checks if the given resource
834   * is partially or fully contained in any of them. In that case the
835   * matching resource is returned, %NULL otherwise.
836   */
pci_find_resource(struct pci_dev * dev,struct resource * res)837  struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
838  {
839  	int i;
840  
841  	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
842  		struct resource *r = &dev->resource[i];
843  
844  		if (r->start && resource_contains(r, res))
845  			return r;
846  	}
847  
848  	return NULL;
849  }
850  EXPORT_SYMBOL(pci_find_resource);
851  
852  /**
853   * pci_resource_name - Return the name of the PCI resource
854   * @dev: PCI device to query
855   * @i: index of the resource
856   *
857   * Return the standard PCI resource (BAR) name according to their index.
858   */
pci_resource_name(struct pci_dev * dev,unsigned int i)859  const char *pci_resource_name(struct pci_dev *dev, unsigned int i)
860  {
861  	static const char * const bar_name[] = {
862  		"BAR 0",
863  		"BAR 1",
864  		"BAR 2",
865  		"BAR 3",
866  		"BAR 4",
867  		"BAR 5",
868  		"ROM",
869  #ifdef CONFIG_PCI_IOV
870  		"VF BAR 0",
871  		"VF BAR 1",
872  		"VF BAR 2",
873  		"VF BAR 3",
874  		"VF BAR 4",
875  		"VF BAR 5",
876  #endif
877  		"bridge window",	/* "io" included in %pR */
878  		"bridge window",	/* "mem" included in %pR */
879  		"bridge window",	/* "mem pref" included in %pR */
880  	};
881  	static const char * const cardbus_name[] = {
882  		"BAR 1",
883  		"unknown",
884  		"unknown",
885  		"unknown",
886  		"unknown",
887  		"unknown",
888  #ifdef CONFIG_PCI_IOV
889  		"unknown",
890  		"unknown",
891  		"unknown",
892  		"unknown",
893  		"unknown",
894  		"unknown",
895  #endif
896  		"CardBus bridge window 0",	/* I/O */
897  		"CardBus bridge window 1",	/* I/O */
898  		"CardBus bridge window 0",	/* mem */
899  		"CardBus bridge window 1",	/* mem */
900  	};
901  
902  	if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS &&
903  	    i < ARRAY_SIZE(cardbus_name))
904  		return cardbus_name[i];
905  
906  	if (i < ARRAY_SIZE(bar_name))
907  		return bar_name[i];
908  
909  	return "unknown";
910  }
911  
912  /**
913   * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
914   * @dev: the PCI device to operate on
915   * @pos: config space offset of status word
916   * @mask: mask of bit(s) to care about in status word
917   *
918   * Return 1 when mask bit(s) in status word clear, 0 otherwise.
919   */
pci_wait_for_pending(struct pci_dev * dev,int pos,u16 mask)920  int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
921  {
922  	int i;
923  
924  	/* Wait for Transaction Pending bit clean */
925  	for (i = 0; i < 4; i++) {
926  		u16 status;
927  		if (i)
928  			msleep((1 << (i - 1)) * 100);
929  
930  		pci_read_config_word(dev, pos, &status);
931  		if (!(status & mask))
932  			return 1;
933  	}
934  
935  	return 0;
936  }
937  
938  static int pci_acs_enable;
939  
940  /**
941   * pci_request_acs - ask for ACS to be enabled if supported
942   */
pci_request_acs(void)943  void pci_request_acs(void)
944  {
945  	pci_acs_enable = 1;
946  }
947  
948  static const char *disable_acs_redir_param;
949  static const char *config_acs_param;
950  
951  struct pci_acs {
952  	u16 cap;
953  	u16 ctrl;
954  	u16 fw_ctrl;
955  };
956  
__pci_config_acs(struct pci_dev * dev,struct pci_acs * caps,const char * p,u16 mask,u16 flags)957  static void __pci_config_acs(struct pci_dev *dev, struct pci_acs *caps,
958  			     const char *p, u16 mask, u16 flags)
959  {
960  	char *delimit;
961  	int ret = 0;
962  
963  	if (!p)
964  		return;
965  
966  	while (*p) {
967  		if (!mask) {
968  			/* Check for ACS flags */
969  			delimit = strstr(p, "@");
970  			if (delimit) {
971  				int end;
972  				u32 shift = 0;
973  
974  				end = delimit - p - 1;
975  
976  				while (end > -1) {
977  					if (*(p + end) == '0') {
978  						mask |= 1 << shift;
979  						shift++;
980  						end--;
981  					} else if (*(p + end) == '1') {
982  						mask |= 1 << shift;
983  						flags |= 1 << shift;
984  						shift++;
985  						end--;
986  					} else if ((*(p + end) == 'x') || (*(p + end) == 'X')) {
987  						shift++;
988  						end--;
989  					} else {
990  						pci_err(dev, "Invalid ACS flags... Ignoring\n");
991  						return;
992  					}
993  				}
994  				p = delimit + 1;
995  			} else {
996  				pci_err(dev, "ACS Flags missing\n");
997  				return;
998  			}
999  		}
1000  
1001  		if (mask & ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR |
1002  			    PCI_ACS_UF | PCI_ACS_EC | PCI_ACS_DT)) {
1003  			pci_err(dev, "Invalid ACS flags specified\n");
1004  			return;
1005  		}
1006  
1007  		ret = pci_dev_str_match(dev, p, &p);
1008  		if (ret < 0) {
1009  			pr_info_once("PCI: Can't parse ACS command line parameter\n");
1010  			break;
1011  		} else if (ret == 1) {
1012  			/* Found a match */
1013  			break;
1014  		}
1015  
1016  		if (*p != ';' && *p != ',') {
1017  			/* End of param or invalid format */
1018  			break;
1019  		}
1020  		p++;
1021  	}
1022  
1023  	if (ret != 1)
1024  		return;
1025  
1026  	if (!pci_dev_specific_disable_acs_redir(dev))
1027  		return;
1028  
1029  	pci_dbg(dev, "ACS mask  = %#06x\n", mask);
1030  	pci_dbg(dev, "ACS flags = %#06x\n", flags);
1031  
1032  	/* If mask is 0 then we copy the bit from the firmware setting. */
1033  	caps->ctrl = (caps->ctrl & ~mask) | (caps->fw_ctrl & mask);
1034  	caps->ctrl |= flags;
1035  
1036  	pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl);
1037  }
1038  
1039  /**
1040   * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
1041   * @dev: the PCI device
1042   * @caps: default ACS controls
1043   */
pci_std_enable_acs(struct pci_dev * dev,struct pci_acs * caps)1044  static void pci_std_enable_acs(struct pci_dev *dev, struct pci_acs *caps)
1045  {
1046  	/* Source Validation */
1047  	caps->ctrl |= (caps->cap & PCI_ACS_SV);
1048  
1049  	/* P2P Request Redirect */
1050  	caps->ctrl |= (caps->cap & PCI_ACS_RR);
1051  
1052  	/* P2P Completion Redirect */
1053  	caps->ctrl |= (caps->cap & PCI_ACS_CR);
1054  
1055  	/* Upstream Forwarding */
1056  	caps->ctrl |= (caps->cap & PCI_ACS_UF);
1057  
1058  	/* Enable Translation Blocking for external devices and noats */
1059  	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
1060  		caps->ctrl |= (caps->cap & PCI_ACS_TB);
1061  }
1062  
1063  /**
1064   * pci_enable_acs - enable ACS if hardware support it
1065   * @dev: the PCI device
1066   */
pci_enable_acs(struct pci_dev * dev)1067  static void pci_enable_acs(struct pci_dev *dev)
1068  {
1069  	struct pci_acs caps;
1070  	bool enable_acs = false;
1071  	int pos;
1072  
1073  	/* If an iommu is present we start with kernel default caps */
1074  	if (pci_acs_enable) {
1075  		if (pci_dev_specific_enable_acs(dev))
1076  			enable_acs = true;
1077  	}
1078  
1079  	pos = dev->acs_cap;
1080  	if (!pos)
1081  		return;
1082  
1083  	pci_read_config_word(dev, pos + PCI_ACS_CAP, &caps.cap);
1084  	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &caps.ctrl);
1085  	caps.fw_ctrl = caps.ctrl;
1086  
1087  	if (enable_acs)
1088  		pci_std_enable_acs(dev, &caps);
1089  
1090  	/*
1091  	 * Always apply caps from the command line, even if there is no iommu.
1092  	 * Trust that the admin has a reason to change the ACS settings.
1093  	 */
1094  	__pci_config_acs(dev, &caps, disable_acs_redir_param,
1095  			 PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC,
1096  			 ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC));
1097  	__pci_config_acs(dev, &caps, config_acs_param, 0, 0);
1098  
1099  	pci_write_config_word(dev, pos + PCI_ACS_CTRL, caps.ctrl);
1100  }
1101  
1102  /**
1103   * pcie_read_tlp_log - read TLP Header Log
1104   * @dev: PCIe device
1105   * @where: PCI Config offset of TLP Header Log
1106   * @tlp_log: TLP Log structure to fill
1107   *
1108   * Fill @tlp_log from TLP Header Log registers, e.g., AER or DPC.
1109   *
1110   * Return: 0 on success and filled TLP Log structure, <0 on error.
1111   */
pcie_read_tlp_log(struct pci_dev * dev,int where,struct pcie_tlp_log * tlp_log)1112  int pcie_read_tlp_log(struct pci_dev *dev, int where,
1113  		      struct pcie_tlp_log *tlp_log)
1114  {
1115  	int i, ret;
1116  
1117  	memset(tlp_log, 0, sizeof(*tlp_log));
1118  
1119  	for (i = 0; i < 4; i++) {
1120  		ret = pci_read_config_dword(dev, where + i * 4,
1121  					    &tlp_log->dw[i]);
1122  		if (ret)
1123  			return pcibios_err_to_errno(ret);
1124  	}
1125  
1126  	return 0;
1127  }
1128  EXPORT_SYMBOL_GPL(pcie_read_tlp_log);
1129  
1130  /**
1131   * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1132   * @dev: PCI device to have its BARs restored
1133   *
1134   * Restore the BAR values for a given device, so as to make it
1135   * accessible by its driver.
1136   */
pci_restore_bars(struct pci_dev * dev)1137  static void pci_restore_bars(struct pci_dev *dev)
1138  {
1139  	int i;
1140  
1141  	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1142  		pci_update_resource(dev, i);
1143  }
1144  
platform_pci_power_manageable(struct pci_dev * dev)1145  static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1146  {
1147  	if (pci_use_mid_pm())
1148  		return true;
1149  
1150  	return acpi_pci_power_manageable(dev);
1151  }
1152  
platform_pci_set_power_state(struct pci_dev * dev,pci_power_t t)1153  static inline int platform_pci_set_power_state(struct pci_dev *dev,
1154  					       pci_power_t t)
1155  {
1156  	if (pci_use_mid_pm())
1157  		return mid_pci_set_power_state(dev, t);
1158  
1159  	return acpi_pci_set_power_state(dev, t);
1160  }
1161  
platform_pci_get_power_state(struct pci_dev * dev)1162  static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1163  {
1164  	if (pci_use_mid_pm())
1165  		return mid_pci_get_power_state(dev);
1166  
1167  	return acpi_pci_get_power_state(dev);
1168  }
1169  
platform_pci_refresh_power_state(struct pci_dev * dev)1170  static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
1171  {
1172  	if (!pci_use_mid_pm())
1173  		acpi_pci_refresh_power_state(dev);
1174  }
1175  
platform_pci_choose_state(struct pci_dev * dev)1176  static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1177  {
1178  	if (pci_use_mid_pm())
1179  		return PCI_POWER_ERROR;
1180  
1181  	return acpi_pci_choose_state(dev);
1182  }
1183  
platform_pci_set_wakeup(struct pci_dev * dev,bool enable)1184  static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1185  {
1186  	if (pci_use_mid_pm())
1187  		return PCI_POWER_ERROR;
1188  
1189  	return acpi_pci_wakeup(dev, enable);
1190  }
1191  
platform_pci_need_resume(struct pci_dev * dev)1192  static inline bool platform_pci_need_resume(struct pci_dev *dev)
1193  {
1194  	if (pci_use_mid_pm())
1195  		return false;
1196  
1197  	return acpi_pci_need_resume(dev);
1198  }
1199  
platform_pci_bridge_d3(struct pci_dev * dev)1200  static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1201  {
1202  	if (pci_use_mid_pm())
1203  		return false;
1204  
1205  	return acpi_pci_bridge_d3(dev);
1206  }
1207  
1208  /**
1209   * pci_update_current_state - Read power state of given device and cache it
1210   * @dev: PCI device to handle.
1211   * @state: State to cache in case the device doesn't have the PM capability
1212   *
1213   * The power state is read from the PMCSR register, which however is
1214   * inaccessible in D3cold.  The platform firmware is therefore queried first
1215   * to detect accessibility of the register.  In case the platform firmware
1216   * reports an incorrect state or the device isn't power manageable by the
1217   * platform at all, we try to detect D3cold by testing accessibility of the
1218   * vendor ID in config space.
1219   */
pci_update_current_state(struct pci_dev * dev,pci_power_t state)1220  void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1221  {
1222  	if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1223  		dev->current_state = PCI_D3cold;
1224  	} else if (dev->pm_cap) {
1225  		u16 pmcsr;
1226  
1227  		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1228  		if (PCI_POSSIBLE_ERROR(pmcsr)) {
1229  			dev->current_state = PCI_D3cold;
1230  			return;
1231  		}
1232  		dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1233  	} else {
1234  		dev->current_state = state;
1235  	}
1236  }
1237  
1238  /**
1239   * pci_refresh_power_state - Refresh the given device's power state data
1240   * @dev: Target PCI device.
1241   *
1242   * Ask the platform to refresh the devices power state information and invoke
1243   * pci_update_current_state() to update its current PCI power state.
1244   */
pci_refresh_power_state(struct pci_dev * dev)1245  void pci_refresh_power_state(struct pci_dev *dev)
1246  {
1247  	platform_pci_refresh_power_state(dev);
1248  	pci_update_current_state(dev, dev->current_state);
1249  }
1250  
1251  /**
1252   * pci_platform_power_transition - Use platform to change device power state
1253   * @dev: PCI device to handle.
1254   * @state: State to put the device into.
1255   */
pci_platform_power_transition(struct pci_dev * dev,pci_power_t state)1256  int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1257  {
1258  	int error;
1259  
1260  	error = platform_pci_set_power_state(dev, state);
1261  	if (!error)
1262  		pci_update_current_state(dev, state);
1263  	else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1264  		dev->current_state = PCI_D0;
1265  
1266  	return error;
1267  }
1268  EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1269  
pci_resume_one(struct pci_dev * pci_dev,void * ign)1270  static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1271  {
1272  	pm_request_resume(&pci_dev->dev);
1273  	return 0;
1274  }
1275  
1276  /**
1277   * pci_resume_bus - Walk given bus and runtime resume devices on it
1278   * @bus: Top bus of the subtree to walk.
1279   */
pci_resume_bus(struct pci_bus * bus)1280  void pci_resume_bus(struct pci_bus *bus)
1281  {
1282  	if (bus)
1283  		pci_walk_bus(bus, pci_resume_one, NULL);
1284  }
1285  
pci_dev_wait(struct pci_dev * dev,char * reset_type,int timeout)1286  static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1287  {
1288  	int delay = 1;
1289  	bool retrain = false;
1290  	struct pci_dev *root, *bridge;
1291  
1292  	root = pcie_find_root_port(dev);
1293  
1294  	if (pci_is_pcie(dev)) {
1295  		bridge = pci_upstream_bridge(dev);
1296  		if (bridge)
1297  			retrain = true;
1298  	}
1299  
1300  	/*
1301  	 * The caller has already waited long enough after a reset that the
1302  	 * device should respond to config requests, but it may respond
1303  	 * with Request Retry Status (RRS) if it needs more time to
1304  	 * initialize.
1305  	 *
1306  	 * If the device is below a Root Port with Configuration RRS
1307  	 * Software Visibility enabled, reading the Vendor ID returns a
1308  	 * special data value if the device responded with RRS.  Read the
1309  	 * Vendor ID until we get non-RRS status.
1310  	 *
1311  	 * If there's no Root Port or Configuration RRS Software Visibility
1312  	 * is not enabled, the device may still respond with RRS, but
1313  	 * hardware may retry the config request.  If no retries receive
1314  	 * Successful Completion, hardware generally synthesizes ~0
1315  	 * (PCI_ERROR_RESPONSE) data to complete the read.  Reading Vendor
1316  	 * ID for VFs and non-existent devices also returns ~0, so read the
1317  	 * Command register until it returns something other than ~0.
1318  	 */
1319  	for (;;) {
1320  		u32 id;
1321  
1322  		if (pci_dev_is_disconnected(dev)) {
1323  			pci_dbg(dev, "disconnected; not waiting\n");
1324  			return -ENOTTY;
1325  		}
1326  
1327  		if (root && root->config_rrs_sv) {
1328  			pci_read_config_dword(dev, PCI_VENDOR_ID, &id);
1329  			if (!pci_bus_rrs_vendor_id(id))
1330  				break;
1331  		} else {
1332  			pci_read_config_dword(dev, PCI_COMMAND, &id);
1333  			if (!PCI_POSSIBLE_ERROR(id))
1334  				break;
1335  		}
1336  
1337  		if (delay > timeout) {
1338  			pci_warn(dev, "not ready %dms after %s; giving up\n",
1339  				 delay - 1, reset_type);
1340  			return -ENOTTY;
1341  		}
1342  
1343  		if (delay > PCI_RESET_WAIT) {
1344  			if (retrain) {
1345  				retrain = false;
1346  				if (pcie_failed_link_retrain(bridge) == 0) {
1347  					delay = 1;
1348  					continue;
1349  				}
1350  			}
1351  			pci_info(dev, "not ready %dms after %s; waiting\n",
1352  				 delay - 1, reset_type);
1353  		}
1354  
1355  		msleep(delay);
1356  		delay *= 2;
1357  	}
1358  
1359  	if (delay > PCI_RESET_WAIT)
1360  		pci_info(dev, "ready %dms after %s\n", delay - 1,
1361  			 reset_type);
1362  	else
1363  		pci_dbg(dev, "ready %dms after %s\n", delay - 1,
1364  			reset_type);
1365  
1366  	return 0;
1367  }
1368  
1369  /**
1370   * pci_power_up - Put the given device into D0
1371   * @dev: PCI device to power up
1372   *
1373   * On success, return 0 or 1, depending on whether or not it is necessary to
1374   * restore the device's BARs subsequently (1 is returned in that case).
1375   *
1376   * On failure, return a negative error code.  Always return failure if @dev
1377   * lacks a Power Management Capability, even if the platform was able to
1378   * put the device in D0 via non-PCI means.
1379   */
pci_power_up(struct pci_dev * dev)1380  int pci_power_up(struct pci_dev *dev)
1381  {
1382  	bool need_restore;
1383  	pci_power_t state;
1384  	u16 pmcsr;
1385  
1386  	platform_pci_set_power_state(dev, PCI_D0);
1387  
1388  	if (!dev->pm_cap) {
1389  		state = platform_pci_get_power_state(dev);
1390  		if (state == PCI_UNKNOWN)
1391  			dev->current_state = PCI_D0;
1392  		else
1393  			dev->current_state = state;
1394  
1395  		return -EIO;
1396  	}
1397  
1398  	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1399  	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1400  		pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1401  			pci_power_name(dev->current_state));
1402  		dev->current_state = PCI_D3cold;
1403  		return -EIO;
1404  	}
1405  
1406  	state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1407  
1408  	need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1409  			!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1410  
1411  	if (state == PCI_D0)
1412  		goto end;
1413  
1414  	/*
1415  	 * Force the entire word to 0. This doesn't affect PME_Status, disables
1416  	 * PME_En, and sets PowerState to 0.
1417  	 */
1418  	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1419  
1420  	/* Mandatory transition delays; see PCI PM 1.2. */
1421  	if (state == PCI_D3hot)
1422  		pci_dev_d3_sleep(dev);
1423  	else if (state == PCI_D2)
1424  		udelay(PCI_PM_D2_DELAY);
1425  
1426  end:
1427  	dev->current_state = PCI_D0;
1428  	if (need_restore)
1429  		return 1;
1430  
1431  	return 0;
1432  }
1433  
1434  /**
1435   * pci_set_full_power_state - Put a PCI device into D0 and update its state
1436   * @dev: PCI device to power up
1437   * @locked: whether pci_bus_sem is held
1438   *
1439   * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1440   * to confirm the state change, restore its BARs if they might be lost and
1441   * reconfigure ASPM in accordance with the new power state.
1442   *
1443   * If pci_restore_state() is going to be called right after a power state change
1444   * to D0, it is more efficient to use pci_power_up() directly instead of this
1445   * function.
1446   */
pci_set_full_power_state(struct pci_dev * dev,bool locked)1447  static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
1448  {
1449  	u16 pmcsr;
1450  	int ret;
1451  
1452  	ret = pci_power_up(dev);
1453  	if (ret < 0) {
1454  		if (dev->current_state == PCI_D0)
1455  			return 0;
1456  
1457  		return ret;
1458  	}
1459  
1460  	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1461  	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1462  	if (dev->current_state != PCI_D0) {
1463  		pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1464  				     pci_power_name(dev->current_state));
1465  	} else if (ret > 0) {
1466  		/*
1467  		 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1468  		 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1469  		 * from D3hot to D0 _may_ perform an internal reset, thereby
1470  		 * going to "D0 Uninitialized" rather than "D0 Initialized".
1471  		 * For example, at least some versions of the 3c905B and the
1472  		 * 3c556B exhibit this behaviour.
1473  		 *
1474  		 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1475  		 * devices in a D3hot state at boot.  Consequently, we need to
1476  		 * restore at least the BARs so that the device will be
1477  		 * accessible to its driver.
1478  		 */
1479  		pci_restore_bars(dev);
1480  	}
1481  
1482  	if (dev->bus->self)
1483  		pcie_aspm_pm_state_change(dev->bus->self, locked);
1484  
1485  	return 0;
1486  }
1487  
1488  /**
1489   * __pci_dev_set_current_state - Set current state of a PCI device
1490   * @dev: Device to handle
1491   * @data: pointer to state to be set
1492   */
__pci_dev_set_current_state(struct pci_dev * dev,void * data)1493  static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1494  {
1495  	pci_power_t state = *(pci_power_t *)data;
1496  
1497  	dev->current_state = state;
1498  	return 0;
1499  }
1500  
1501  /**
1502   * pci_bus_set_current_state - Walk given bus and set current state of devices
1503   * @bus: Top bus of the subtree to walk.
1504   * @state: state to be set
1505   */
pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state)1506  void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1507  {
1508  	if (bus)
1509  		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1510  }
1511  
__pci_bus_set_current_state(struct pci_bus * bus,pci_power_t state,bool locked)1512  static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
1513  {
1514  	if (!bus)
1515  		return;
1516  
1517  	if (locked)
1518  		pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
1519  	else
1520  		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1521  }
1522  
1523  /**
1524   * pci_set_low_power_state - Put a PCI device into a low-power state.
1525   * @dev: PCI device to handle.
1526   * @state: PCI power state (D1, D2, D3hot) to put the device into.
1527   * @locked: whether pci_bus_sem is held
1528   *
1529   * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1530   *
1531   * RETURN VALUE:
1532   * -EINVAL if the requested state is invalid.
1533   * -EIO if device does not support PCI PM or its PM capabilities register has a
1534   * wrong version, or device doesn't support the requested state.
1535   * 0 if device already is in the requested state.
1536   * 0 if device's power state has been successfully changed.
1537   */
pci_set_low_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1538  static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1539  {
1540  	u16 pmcsr;
1541  
1542  	if (!dev->pm_cap)
1543  		return -EIO;
1544  
1545  	/*
1546  	 * Validate transition: We can enter D0 from any state, but if
1547  	 * we're already in a low-power state, we can only go deeper.  E.g.,
1548  	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1549  	 * we'd have to go from D3 to D0, then to D1.
1550  	 */
1551  	if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1552  		pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1553  			pci_power_name(dev->current_state),
1554  			pci_power_name(state));
1555  		return -EINVAL;
1556  	}
1557  
1558  	/* Check if this device supports the desired state */
1559  	if ((state == PCI_D1 && !dev->d1_support)
1560  	   || (state == PCI_D2 && !dev->d2_support))
1561  		return -EIO;
1562  
1563  	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1564  	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1565  		pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1566  			pci_power_name(dev->current_state),
1567  			pci_power_name(state));
1568  		dev->current_state = PCI_D3cold;
1569  		return -EIO;
1570  	}
1571  
1572  	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1573  	pmcsr |= state;
1574  
1575  	/* Enter specified state */
1576  	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1577  
1578  	/* Mandatory power management transition delays; see PCI PM 1.2. */
1579  	if (state == PCI_D3hot)
1580  		pci_dev_d3_sleep(dev);
1581  	else if (state == PCI_D2)
1582  		udelay(PCI_PM_D2_DELAY);
1583  
1584  	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1585  	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1586  	if (dev->current_state != state)
1587  		pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1588  				     pci_power_name(dev->current_state),
1589  				     pci_power_name(state));
1590  
1591  	if (dev->bus->self)
1592  		pcie_aspm_pm_state_change(dev->bus->self, locked);
1593  
1594  	return 0;
1595  }
1596  
__pci_set_power_state(struct pci_dev * dev,pci_power_t state,bool locked)1597  static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
1598  {
1599  	int error;
1600  
1601  	/* Bound the state we're entering */
1602  	if (state > PCI_D3cold)
1603  		state = PCI_D3cold;
1604  	else if (state < PCI_D0)
1605  		state = PCI_D0;
1606  	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1607  
1608  		/*
1609  		 * If the device or the parent bridge do not support PCI
1610  		 * PM, ignore the request if we're doing anything other
1611  		 * than putting it into D0 (which would only happen on
1612  		 * boot).
1613  		 */
1614  		return 0;
1615  
1616  	/* Check if we're already there */
1617  	if (dev->current_state == state)
1618  		return 0;
1619  
1620  	if (state == PCI_D0)
1621  		return pci_set_full_power_state(dev, locked);
1622  
1623  	/*
1624  	 * This device is quirked not to be put into D3, so don't put it in
1625  	 * D3
1626  	 */
1627  	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1628  		return 0;
1629  
1630  	if (state == PCI_D3cold) {
1631  		/*
1632  		 * To put the device in D3cold, put it into D3hot in the native
1633  		 * way, then put it into D3cold using platform ops.
1634  		 */
1635  		error = pci_set_low_power_state(dev, PCI_D3hot, locked);
1636  
1637  		if (pci_platform_power_transition(dev, PCI_D3cold))
1638  			return error;
1639  
1640  		/* Powering off a bridge may power off the whole hierarchy */
1641  		if (dev->current_state == PCI_D3cold)
1642  			__pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
1643  	} else {
1644  		error = pci_set_low_power_state(dev, state, locked);
1645  
1646  		if (pci_platform_power_transition(dev, state))
1647  			return error;
1648  	}
1649  
1650  	return 0;
1651  }
1652  
1653  /**
1654   * pci_set_power_state - Set the power state of a PCI device
1655   * @dev: PCI device to handle.
1656   * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1657   *
1658   * Transition a device to a new power state, using the platform firmware and/or
1659   * the device's PCI PM registers.
1660   *
1661   * RETURN VALUE:
1662   * -EINVAL if the requested state is invalid.
1663   * -EIO if device does not support PCI PM or its PM capabilities register has a
1664   * wrong version, or device doesn't support the requested state.
1665   * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1666   * 0 if device already is in the requested state.
1667   * 0 if the transition is to D3 but D3 is not supported.
1668   * 0 if device's power state has been successfully changed.
1669   */
pci_set_power_state(struct pci_dev * dev,pci_power_t state)1670  int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1671  {
1672  	return __pci_set_power_state(dev, state, false);
1673  }
1674  EXPORT_SYMBOL(pci_set_power_state);
1675  
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)1676  int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
1677  {
1678  	lockdep_assert_held(&pci_bus_sem);
1679  
1680  	return __pci_set_power_state(dev, state, true);
1681  }
1682  EXPORT_SYMBOL(pci_set_power_state_locked);
1683  
1684  #define PCI_EXP_SAVE_REGS	7
1685  
_pci_find_saved_cap(struct pci_dev * pci_dev,u16 cap,bool extended)1686  static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1687  						       u16 cap, bool extended)
1688  {
1689  	struct pci_cap_saved_state *tmp;
1690  
1691  	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1692  		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1693  			return tmp;
1694  	}
1695  	return NULL;
1696  }
1697  
pci_find_saved_cap(struct pci_dev * dev,char cap)1698  struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1699  {
1700  	return _pci_find_saved_cap(dev, cap, false);
1701  }
1702  
pci_find_saved_ext_cap(struct pci_dev * dev,u16 cap)1703  struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1704  {
1705  	return _pci_find_saved_cap(dev, cap, true);
1706  }
1707  
pci_save_pcie_state(struct pci_dev * dev)1708  static int pci_save_pcie_state(struct pci_dev *dev)
1709  {
1710  	int i = 0;
1711  	struct pci_cap_saved_state *save_state;
1712  	u16 *cap;
1713  
1714  	if (!pci_is_pcie(dev))
1715  		return 0;
1716  
1717  	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1718  	if (!save_state) {
1719  		pci_err(dev, "buffer not found in %s\n", __func__);
1720  		return -ENOMEM;
1721  	}
1722  
1723  	cap = (u16 *)&save_state->cap.data[0];
1724  	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1725  	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1726  	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1727  	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1728  	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1729  	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1730  	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1731  
1732  	pci_save_aspm_l1ss_state(dev);
1733  	pci_save_ltr_state(dev);
1734  
1735  	return 0;
1736  }
1737  
pci_restore_pcie_state(struct pci_dev * dev)1738  static void pci_restore_pcie_state(struct pci_dev *dev)
1739  {
1740  	int i = 0;
1741  	struct pci_cap_saved_state *save_state;
1742  	u16 *cap;
1743  
1744  	/*
1745  	 * Restore max latencies (in the LTR capability) before enabling
1746  	 * LTR itself in PCI_EXP_DEVCTL2.
1747  	 */
1748  	pci_restore_ltr_state(dev);
1749  	pci_restore_aspm_l1ss_state(dev);
1750  
1751  	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1752  	if (!save_state)
1753  		return;
1754  
1755  	/*
1756  	 * Downstream ports reset the LTR enable bit when link goes down.
1757  	 * Check and re-configure the bit here before restoring device.
1758  	 * PCIe r5.0, sec 7.5.3.16.
1759  	 */
1760  	pci_bridge_reconfigure_ltr(dev);
1761  
1762  	cap = (u16 *)&save_state->cap.data[0];
1763  	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1764  	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1765  	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1766  	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1767  	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1768  	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1769  	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
1770  }
1771  
pci_save_pcix_state(struct pci_dev * dev)1772  static int pci_save_pcix_state(struct pci_dev *dev)
1773  {
1774  	int pos;
1775  	struct pci_cap_saved_state *save_state;
1776  
1777  	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1778  	if (!pos)
1779  		return 0;
1780  
1781  	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1782  	if (!save_state) {
1783  		pci_err(dev, "buffer not found in %s\n", __func__);
1784  		return -ENOMEM;
1785  	}
1786  
1787  	pci_read_config_word(dev, pos + PCI_X_CMD,
1788  			     (u16 *)save_state->cap.data);
1789  
1790  	return 0;
1791  }
1792  
pci_restore_pcix_state(struct pci_dev * dev)1793  static void pci_restore_pcix_state(struct pci_dev *dev)
1794  {
1795  	int i = 0, pos;
1796  	struct pci_cap_saved_state *save_state;
1797  	u16 *cap;
1798  
1799  	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1800  	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1801  	if (!save_state || !pos)
1802  		return;
1803  	cap = (u16 *)&save_state->cap.data[0];
1804  
1805  	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1806  }
1807  
1808  /**
1809   * pci_save_state - save the PCI configuration space of a device before
1810   *		    suspending
1811   * @dev: PCI device that we're dealing with
1812   */
pci_save_state(struct pci_dev * dev)1813  int pci_save_state(struct pci_dev *dev)
1814  {
1815  	int i;
1816  	/* XXX: 100% dword access ok here? */
1817  	for (i = 0; i < 16; i++) {
1818  		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1819  		pci_dbg(dev, "save config %#04x: %#010x\n",
1820  			i * 4, dev->saved_config_space[i]);
1821  	}
1822  	dev->state_saved = true;
1823  
1824  	i = pci_save_pcie_state(dev);
1825  	if (i != 0)
1826  		return i;
1827  
1828  	i = pci_save_pcix_state(dev);
1829  	if (i != 0)
1830  		return i;
1831  
1832  	pci_save_dpc_state(dev);
1833  	pci_save_aer_state(dev);
1834  	pci_save_ptm_state(dev);
1835  	return pci_save_vc_state(dev);
1836  }
1837  EXPORT_SYMBOL(pci_save_state);
1838  
pci_restore_config_dword(struct pci_dev * pdev,int offset,u32 saved_val,int retry,bool force)1839  static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1840  				     u32 saved_val, int retry, bool force)
1841  {
1842  	u32 val;
1843  
1844  	pci_read_config_dword(pdev, offset, &val);
1845  	if (!force && val == saved_val)
1846  		return;
1847  
1848  	for (;;) {
1849  		pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n",
1850  			offset, val, saved_val);
1851  		pci_write_config_dword(pdev, offset, saved_val);
1852  		if (retry-- <= 0)
1853  			return;
1854  
1855  		pci_read_config_dword(pdev, offset, &val);
1856  		if (val == saved_val)
1857  			return;
1858  
1859  		mdelay(1);
1860  	}
1861  }
1862  
pci_restore_config_space_range(struct pci_dev * pdev,int start,int end,int retry,bool force)1863  static void pci_restore_config_space_range(struct pci_dev *pdev,
1864  					   int start, int end, int retry,
1865  					   bool force)
1866  {
1867  	int index;
1868  
1869  	for (index = end; index >= start; index--)
1870  		pci_restore_config_dword(pdev, 4 * index,
1871  					 pdev->saved_config_space[index],
1872  					 retry, force);
1873  }
1874  
pci_restore_config_space(struct pci_dev * pdev)1875  static void pci_restore_config_space(struct pci_dev *pdev)
1876  {
1877  	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1878  		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1879  		/* Restore BARs before the command register. */
1880  		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1881  		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1882  	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1883  		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1884  
1885  		/*
1886  		 * Force rewriting of prefetch registers to avoid S3 resume
1887  		 * issues on Intel PCI bridges that occur when these
1888  		 * registers are not explicitly written.
1889  		 */
1890  		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1891  		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1892  	} else {
1893  		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1894  	}
1895  }
1896  
pci_restore_rebar_state(struct pci_dev * pdev)1897  static void pci_restore_rebar_state(struct pci_dev *pdev)
1898  {
1899  	unsigned int pos, nbars, i;
1900  	u32 ctrl;
1901  
1902  	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1903  	if (!pos)
1904  		return;
1905  
1906  	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1907  	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
1908  
1909  	for (i = 0; i < nbars; i++, pos += 8) {
1910  		struct resource *res;
1911  		int bar_idx, size;
1912  
1913  		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1914  		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1915  		res = pdev->resource + bar_idx;
1916  		size = pci_rebar_bytes_to_size(resource_size(res));
1917  		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1918  		ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
1919  		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1920  	}
1921  }
1922  
1923  /**
1924   * pci_restore_state - Restore the saved state of a PCI device
1925   * @dev: PCI device that we're dealing with
1926   */
pci_restore_state(struct pci_dev * dev)1927  void pci_restore_state(struct pci_dev *dev)
1928  {
1929  	if (!dev->state_saved)
1930  		return;
1931  
1932  	pci_restore_pcie_state(dev);
1933  	pci_restore_pasid_state(dev);
1934  	pci_restore_pri_state(dev);
1935  	pci_restore_ats_state(dev);
1936  	pci_restore_vc_state(dev);
1937  	pci_restore_rebar_state(dev);
1938  	pci_restore_dpc_state(dev);
1939  	pci_restore_ptm_state(dev);
1940  
1941  	pci_aer_clear_status(dev);
1942  	pci_restore_aer_state(dev);
1943  
1944  	pci_restore_config_space(dev);
1945  
1946  	pci_restore_pcix_state(dev);
1947  	pci_restore_msi_state(dev);
1948  
1949  	/* Restore ACS and IOV configuration state */
1950  	pci_enable_acs(dev);
1951  	pci_restore_iov_state(dev);
1952  
1953  	dev->state_saved = false;
1954  }
1955  EXPORT_SYMBOL(pci_restore_state);
1956  
1957  struct pci_saved_state {
1958  	u32 config_space[16];
1959  	struct pci_cap_saved_data cap[];
1960  };
1961  
1962  /**
1963   * pci_store_saved_state - Allocate and return an opaque struct containing
1964   *			   the device saved state.
1965   * @dev: PCI device that we're dealing with
1966   *
1967   * Return NULL if no state or error.
1968   */
pci_store_saved_state(struct pci_dev * dev)1969  struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1970  {
1971  	struct pci_saved_state *state;
1972  	struct pci_cap_saved_state *tmp;
1973  	struct pci_cap_saved_data *cap;
1974  	size_t size;
1975  
1976  	if (!dev->state_saved)
1977  		return NULL;
1978  
1979  	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1980  
1981  	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1982  		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1983  
1984  	state = kzalloc(size, GFP_KERNEL);
1985  	if (!state)
1986  		return NULL;
1987  
1988  	memcpy(state->config_space, dev->saved_config_space,
1989  	       sizeof(state->config_space));
1990  
1991  	cap = state->cap;
1992  	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1993  		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1994  		memcpy(cap, &tmp->cap, len);
1995  		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1996  	}
1997  	/* Empty cap_save terminates list */
1998  
1999  	return state;
2000  }
2001  EXPORT_SYMBOL_GPL(pci_store_saved_state);
2002  
2003  /**
2004   * pci_load_saved_state - Reload the provided save state into struct pci_dev.
2005   * @dev: PCI device that we're dealing with
2006   * @state: Saved state returned from pci_store_saved_state()
2007   */
pci_load_saved_state(struct pci_dev * dev,struct pci_saved_state * state)2008  int pci_load_saved_state(struct pci_dev *dev,
2009  			 struct pci_saved_state *state)
2010  {
2011  	struct pci_cap_saved_data *cap;
2012  
2013  	dev->state_saved = false;
2014  
2015  	if (!state)
2016  		return 0;
2017  
2018  	memcpy(dev->saved_config_space, state->config_space,
2019  	       sizeof(state->config_space));
2020  
2021  	cap = state->cap;
2022  	while (cap->size) {
2023  		struct pci_cap_saved_state *tmp;
2024  
2025  		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
2026  		if (!tmp || tmp->cap.size != cap->size)
2027  			return -EINVAL;
2028  
2029  		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
2030  		cap = (struct pci_cap_saved_data *)((u8 *)cap +
2031  		       sizeof(struct pci_cap_saved_data) + cap->size);
2032  	}
2033  
2034  	dev->state_saved = true;
2035  	return 0;
2036  }
2037  EXPORT_SYMBOL_GPL(pci_load_saved_state);
2038  
2039  /**
2040   * pci_load_and_free_saved_state - Reload the save state pointed to by state,
2041   *				   and free the memory allocated for it.
2042   * @dev: PCI device that we're dealing with
2043   * @state: Pointer to saved state returned from pci_store_saved_state()
2044   */
pci_load_and_free_saved_state(struct pci_dev * dev,struct pci_saved_state ** state)2045  int pci_load_and_free_saved_state(struct pci_dev *dev,
2046  				  struct pci_saved_state **state)
2047  {
2048  	int ret = pci_load_saved_state(dev, *state);
2049  	kfree(*state);
2050  	*state = NULL;
2051  	return ret;
2052  }
2053  EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
2054  
pcibios_enable_device(struct pci_dev * dev,int bars)2055  int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
2056  {
2057  	return pci_enable_resources(dev, bars);
2058  }
2059  
do_pci_enable_device(struct pci_dev * dev,int bars)2060  static int do_pci_enable_device(struct pci_dev *dev, int bars)
2061  {
2062  	int err;
2063  	struct pci_dev *bridge;
2064  	u16 cmd;
2065  	u8 pin;
2066  
2067  	err = pci_set_power_state(dev, PCI_D0);
2068  	if (err < 0 && err != -EIO)
2069  		return err;
2070  
2071  	bridge = pci_upstream_bridge(dev);
2072  	if (bridge)
2073  		pcie_aspm_powersave_config_link(bridge);
2074  
2075  	err = pcibios_enable_device(dev, bars);
2076  	if (err < 0)
2077  		return err;
2078  	pci_fixup_device(pci_fixup_enable, dev);
2079  
2080  	if (dev->msi_enabled || dev->msix_enabled)
2081  		return 0;
2082  
2083  	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2084  	if (pin) {
2085  		pci_read_config_word(dev, PCI_COMMAND, &cmd);
2086  		if (cmd & PCI_COMMAND_INTX_DISABLE)
2087  			pci_write_config_word(dev, PCI_COMMAND,
2088  					      cmd & ~PCI_COMMAND_INTX_DISABLE);
2089  	}
2090  
2091  	return 0;
2092  }
2093  
2094  /**
2095   * pci_reenable_device - Resume abandoned device
2096   * @dev: PCI device to be resumed
2097   *
2098   * NOTE: This function is a backend of pci_default_resume() and is not supposed
2099   * to be called by normal code, write proper resume handler and use it instead.
2100   */
pci_reenable_device(struct pci_dev * dev)2101  int pci_reenable_device(struct pci_dev *dev)
2102  {
2103  	if (pci_is_enabled(dev))
2104  		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
2105  	return 0;
2106  }
2107  EXPORT_SYMBOL(pci_reenable_device);
2108  
pci_enable_bridge(struct pci_dev * dev)2109  static void pci_enable_bridge(struct pci_dev *dev)
2110  {
2111  	struct pci_dev *bridge;
2112  	int retval;
2113  
2114  	bridge = pci_upstream_bridge(dev);
2115  	if (bridge)
2116  		pci_enable_bridge(bridge);
2117  
2118  	if (pci_is_enabled(dev)) {
2119  		if (!dev->is_busmaster)
2120  			pci_set_master(dev);
2121  		return;
2122  	}
2123  
2124  	retval = pci_enable_device(dev);
2125  	if (retval)
2126  		pci_err(dev, "Error enabling bridge (%d), continuing\n",
2127  			retval);
2128  	pci_set_master(dev);
2129  }
2130  
pci_enable_device_flags(struct pci_dev * dev,unsigned long flags)2131  static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
2132  {
2133  	struct pci_dev *bridge;
2134  	int err;
2135  	int i, bars = 0;
2136  
2137  	/*
2138  	 * Power state could be unknown at this point, either due to a fresh
2139  	 * boot or a device removal call.  So get the current power state
2140  	 * so that things like MSI message writing will behave as expected
2141  	 * (e.g. if the device really is in D0 at enable time).
2142  	 */
2143  	pci_update_current_state(dev, dev->current_state);
2144  
2145  	if (atomic_inc_return(&dev->enable_cnt) > 1)
2146  		return 0;		/* already enabled */
2147  
2148  	bridge = pci_upstream_bridge(dev);
2149  	if (bridge)
2150  		pci_enable_bridge(bridge);
2151  
2152  	/* only skip sriov related */
2153  	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
2154  		if (dev->resource[i].flags & flags)
2155  			bars |= (1 << i);
2156  	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2157  		if (dev->resource[i].flags & flags)
2158  			bars |= (1 << i);
2159  
2160  	err = do_pci_enable_device(dev, bars);
2161  	if (err < 0)
2162  		atomic_dec(&dev->enable_cnt);
2163  	return err;
2164  }
2165  
2166  /**
2167   * pci_enable_device_mem - Initialize a device for use with Memory space
2168   * @dev: PCI device to be initialized
2169   *
2170   * Initialize device before it's used by a driver. Ask low-level code
2171   * to enable Memory resources. Wake up the device if it was suspended.
2172   * Beware, this function can fail.
2173   */
pci_enable_device_mem(struct pci_dev * dev)2174  int pci_enable_device_mem(struct pci_dev *dev)
2175  {
2176  	return pci_enable_device_flags(dev, IORESOURCE_MEM);
2177  }
2178  EXPORT_SYMBOL(pci_enable_device_mem);
2179  
2180  /**
2181   * pci_enable_device - Initialize device before it's used by a driver.
2182   * @dev: PCI device to be initialized
2183   *
2184   * Initialize device before it's used by a driver. Ask low-level code
2185   * to enable I/O and memory. Wake up the device if it was suspended.
2186   * Beware, this function can fail.
2187   *
2188   * Note we don't actually enable the device many times if we call
2189   * this function repeatedly (we just increment the count).
2190   */
pci_enable_device(struct pci_dev * dev)2191  int pci_enable_device(struct pci_dev *dev)
2192  {
2193  	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2194  }
2195  EXPORT_SYMBOL(pci_enable_device);
2196  
2197  /*
2198   * pcibios_device_add - provide arch specific hooks when adding device dev
2199   * @dev: the PCI device being added
2200   *
2201   * Permits the platform to provide architecture specific functionality when
2202   * devices are added. This is the default implementation. Architecture
2203   * implementations can override this.
2204   */
pcibios_device_add(struct pci_dev * dev)2205  int __weak pcibios_device_add(struct pci_dev *dev)
2206  {
2207  	return 0;
2208  }
2209  
2210  /**
2211   * pcibios_release_device - provide arch specific hooks when releasing
2212   *			    device dev
2213   * @dev: the PCI device being released
2214   *
2215   * Permits the platform to provide architecture specific functionality when
2216   * devices are released. This is the default implementation. Architecture
2217   * implementations can override this.
2218   */
pcibios_release_device(struct pci_dev * dev)2219  void __weak pcibios_release_device(struct pci_dev *dev) {}
2220  
2221  /**
2222   * pcibios_disable_device - disable arch specific PCI resources for device dev
2223   * @dev: the PCI device to disable
2224   *
2225   * Disables architecture specific PCI resources for the device. This
2226   * is the default implementation. Architecture implementations can
2227   * override this.
2228   */
pcibios_disable_device(struct pci_dev * dev)2229  void __weak pcibios_disable_device(struct pci_dev *dev) {}
2230  
do_pci_disable_device(struct pci_dev * dev)2231  static void do_pci_disable_device(struct pci_dev *dev)
2232  {
2233  	u16 pci_command;
2234  
2235  	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2236  	if (pci_command & PCI_COMMAND_MASTER) {
2237  		pci_command &= ~PCI_COMMAND_MASTER;
2238  		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2239  	}
2240  
2241  	pcibios_disable_device(dev);
2242  }
2243  
2244  /**
2245   * pci_disable_enabled_device - Disable device without updating enable_cnt
2246   * @dev: PCI device to disable
2247   *
2248   * NOTE: This function is a backend of PCI power management routines and is
2249   * not supposed to be called drivers.
2250   */
pci_disable_enabled_device(struct pci_dev * dev)2251  void pci_disable_enabled_device(struct pci_dev *dev)
2252  {
2253  	if (pci_is_enabled(dev))
2254  		do_pci_disable_device(dev);
2255  }
2256  
2257  /**
2258   * pci_disable_device - Disable PCI device after use
2259   * @dev: PCI device to be disabled
2260   *
2261   * Signal to the system that the PCI device is not in use by the system
2262   * anymore.  This only involves disabling PCI bus-mastering, if active.
2263   *
2264   * Note we don't actually disable the device until all callers of
2265   * pci_enable_device() have called pci_disable_device().
2266   */
pci_disable_device(struct pci_dev * dev)2267  void pci_disable_device(struct pci_dev *dev)
2268  {
2269  	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2270  		      "disabling already-disabled device");
2271  
2272  	if (atomic_dec_return(&dev->enable_cnt) != 0)
2273  		return;
2274  
2275  	do_pci_disable_device(dev);
2276  
2277  	dev->is_busmaster = 0;
2278  }
2279  EXPORT_SYMBOL(pci_disable_device);
2280  
2281  /**
2282   * pcibios_set_pcie_reset_state - set reset state for device dev
2283   * @dev: the PCIe device reset
2284   * @state: Reset state to enter into
2285   *
2286   * Set the PCIe reset state for the device. This is the default
2287   * implementation. Architecture implementations can override this.
2288   */
pcibios_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2289  int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2290  					enum pcie_reset_state state)
2291  {
2292  	return -EINVAL;
2293  }
2294  
2295  /**
2296   * pci_set_pcie_reset_state - set reset state for device dev
2297   * @dev: the PCIe device reset
2298   * @state: Reset state to enter into
2299   *
2300   * Sets the PCI reset state for the device.
2301   */
pci_set_pcie_reset_state(struct pci_dev * dev,enum pcie_reset_state state)2302  int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2303  {
2304  	return pcibios_set_pcie_reset_state(dev, state);
2305  }
2306  EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2307  
2308  #ifdef CONFIG_PCIEAER
pcie_clear_device_status(struct pci_dev * dev)2309  void pcie_clear_device_status(struct pci_dev *dev)
2310  {
2311  	u16 sta;
2312  
2313  	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2314  	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2315  }
2316  #endif
2317  
2318  /**
2319   * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2320   * @dev: PCIe root port or event collector.
2321   */
pcie_clear_root_pme_status(struct pci_dev * dev)2322  void pcie_clear_root_pme_status(struct pci_dev *dev)
2323  {
2324  	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2325  }
2326  
2327  /**
2328   * pci_check_pme_status - Check if given device has generated PME.
2329   * @dev: Device to check.
2330   *
2331   * Check the PME status of the device and if set, clear it and clear PME enable
2332   * (if set).  Return 'true' if PME status and PME enable were both set or
2333   * 'false' otherwise.
2334   */
pci_check_pme_status(struct pci_dev * dev)2335  bool pci_check_pme_status(struct pci_dev *dev)
2336  {
2337  	int pmcsr_pos;
2338  	u16 pmcsr;
2339  	bool ret = false;
2340  
2341  	if (!dev->pm_cap)
2342  		return false;
2343  
2344  	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2345  	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2346  	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2347  		return false;
2348  
2349  	/* Clear PME status. */
2350  	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2351  	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2352  		/* Disable PME to avoid interrupt flood. */
2353  		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2354  		ret = true;
2355  	}
2356  
2357  	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2358  
2359  	return ret;
2360  }
2361  
2362  /**
2363   * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2364   * @dev: Device to handle.
2365   * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2366   *
2367   * Check if @dev has generated PME and queue a resume request for it in that
2368   * case.
2369   */
pci_pme_wakeup(struct pci_dev * dev,void * pme_poll_reset)2370  static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2371  {
2372  	if (pme_poll_reset && dev->pme_poll)
2373  		dev->pme_poll = false;
2374  
2375  	if (pci_check_pme_status(dev)) {
2376  		pci_wakeup_event(dev);
2377  		pm_request_resume(&dev->dev);
2378  	}
2379  	return 0;
2380  }
2381  
2382  /**
2383   * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2384   * @bus: Top bus of the subtree to walk.
2385   */
pci_pme_wakeup_bus(struct pci_bus * bus)2386  void pci_pme_wakeup_bus(struct pci_bus *bus)
2387  {
2388  	if (bus)
2389  		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2390  }
2391  
2392  
2393  /**
2394   * pci_pme_capable - check the capability of PCI device to generate PME#
2395   * @dev: PCI device to handle.
2396   * @state: PCI state from which device will issue PME#.
2397   */
pci_pme_capable(struct pci_dev * dev,pci_power_t state)2398  bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2399  {
2400  	if (!dev->pm_cap)
2401  		return false;
2402  
2403  	return !!(dev->pme_support & (1 << state));
2404  }
2405  EXPORT_SYMBOL(pci_pme_capable);
2406  
pci_pme_list_scan(struct work_struct * work)2407  static void pci_pme_list_scan(struct work_struct *work)
2408  {
2409  	struct pci_pme_device *pme_dev, *n;
2410  
2411  	mutex_lock(&pci_pme_list_mutex);
2412  	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2413  		struct pci_dev *pdev = pme_dev->dev;
2414  
2415  		if (pdev->pme_poll) {
2416  			struct pci_dev *bridge = pdev->bus->self;
2417  			struct device *dev = &pdev->dev;
2418  			struct device *bdev = bridge ? &bridge->dev : NULL;
2419  			int bref = 0;
2420  
2421  			/*
2422  			 * If we have a bridge, it should be in an active/D0
2423  			 * state or the configuration space of subordinate
2424  			 * devices may not be accessible or stable over the
2425  			 * course of the call.
2426  			 */
2427  			if (bdev) {
2428  				bref = pm_runtime_get_if_active(bdev);
2429  				if (!bref)
2430  					continue;
2431  
2432  				if (bridge->current_state != PCI_D0)
2433  					goto put_bridge;
2434  			}
2435  
2436  			/*
2437  			 * The device itself should be suspended but config
2438  			 * space must be accessible, therefore it cannot be in
2439  			 * D3cold.
2440  			 */
2441  			if (pm_runtime_suspended(dev) &&
2442  			    pdev->current_state != PCI_D3cold)
2443  				pci_pme_wakeup(pdev, NULL);
2444  
2445  put_bridge:
2446  			if (bref > 0)
2447  				pm_runtime_put(bdev);
2448  		} else {
2449  			list_del(&pme_dev->list);
2450  			kfree(pme_dev);
2451  		}
2452  	}
2453  	if (!list_empty(&pci_pme_list))
2454  		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2455  				   msecs_to_jiffies(PME_TIMEOUT));
2456  	mutex_unlock(&pci_pme_list_mutex);
2457  }
2458  
__pci_pme_active(struct pci_dev * dev,bool enable)2459  static void __pci_pme_active(struct pci_dev *dev, bool enable)
2460  {
2461  	u16 pmcsr;
2462  
2463  	if (!dev->pme_support)
2464  		return;
2465  
2466  	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2467  	/* Clear PME_Status by writing 1 to it and enable PME# */
2468  	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2469  	if (!enable)
2470  		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2471  
2472  	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2473  }
2474  
2475  /**
2476   * pci_pme_restore - Restore PME configuration after config space restore.
2477   * @dev: PCI device to update.
2478   */
pci_pme_restore(struct pci_dev * dev)2479  void pci_pme_restore(struct pci_dev *dev)
2480  {
2481  	u16 pmcsr;
2482  
2483  	if (!dev->pme_support)
2484  		return;
2485  
2486  	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2487  	if (dev->wakeup_prepared) {
2488  		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2489  		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2490  	} else {
2491  		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2492  		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2493  	}
2494  	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2495  }
2496  
2497  /**
2498   * pci_pme_active - enable or disable PCI device's PME# function
2499   * @dev: PCI device to handle.
2500   * @enable: 'true' to enable PME# generation; 'false' to disable it.
2501   *
2502   * The caller must verify that the device is capable of generating PME# before
2503   * calling this function with @enable equal to 'true'.
2504   */
pci_pme_active(struct pci_dev * dev,bool enable)2505  void pci_pme_active(struct pci_dev *dev, bool enable)
2506  {
2507  	__pci_pme_active(dev, enable);
2508  
2509  	/*
2510  	 * PCI (as opposed to PCIe) PME requires that the device have
2511  	 * its PME# line hooked up correctly. Not all hardware vendors
2512  	 * do this, so the PME never gets delivered and the device
2513  	 * remains asleep. The easiest way around this is to
2514  	 * periodically walk the list of suspended devices and check
2515  	 * whether any have their PME flag set. The assumption is that
2516  	 * we'll wake up often enough anyway that this won't be a huge
2517  	 * hit, and the power savings from the devices will still be a
2518  	 * win.
2519  	 *
2520  	 * Although PCIe uses in-band PME message instead of PME# line
2521  	 * to report PME, PME does not work for some PCIe devices in
2522  	 * reality.  For example, there are devices that set their PME
2523  	 * status bits, but don't really bother to send a PME message;
2524  	 * there are PCI Express Root Ports that don't bother to
2525  	 * trigger interrupts when they receive PME messages from the
2526  	 * devices below.  So PME poll is used for PCIe devices too.
2527  	 */
2528  
2529  	if (dev->pme_poll) {
2530  		struct pci_pme_device *pme_dev;
2531  		if (enable) {
2532  			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2533  					  GFP_KERNEL);
2534  			if (!pme_dev) {
2535  				pci_warn(dev, "can't enable PME#\n");
2536  				return;
2537  			}
2538  			pme_dev->dev = dev;
2539  			mutex_lock(&pci_pme_list_mutex);
2540  			list_add(&pme_dev->list, &pci_pme_list);
2541  			if (list_is_singular(&pci_pme_list))
2542  				queue_delayed_work(system_freezable_wq,
2543  						   &pci_pme_work,
2544  						   msecs_to_jiffies(PME_TIMEOUT));
2545  			mutex_unlock(&pci_pme_list_mutex);
2546  		} else {
2547  			mutex_lock(&pci_pme_list_mutex);
2548  			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2549  				if (pme_dev->dev == dev) {
2550  					list_del(&pme_dev->list);
2551  					kfree(pme_dev);
2552  					break;
2553  				}
2554  			}
2555  			mutex_unlock(&pci_pme_list_mutex);
2556  		}
2557  	}
2558  
2559  	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
2560  }
2561  EXPORT_SYMBOL(pci_pme_active);
2562  
2563  /**
2564   * __pci_enable_wake - enable PCI device as wakeup event source
2565   * @dev: PCI device affected
2566   * @state: PCI state from which device will issue wakeup events
2567   * @enable: True to enable event generation; false to disable
2568   *
2569   * This enables the device as a wakeup event source, or disables it.
2570   * When such events involves platform-specific hooks, those hooks are
2571   * called automatically by this routine.
2572   *
2573   * Devices with legacy power management (no standard PCI PM capabilities)
2574   * always require such platform hooks.
2575   *
2576   * RETURN VALUE:
2577   * 0 is returned on success
2578   * -EINVAL is returned if device is not supposed to wake up the system
2579   * Error code depending on the platform is returned if both the platform and
2580   * the native mechanism fail to enable the generation of wake-up events
2581   */
__pci_enable_wake(struct pci_dev * dev,pci_power_t state,bool enable)2582  static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
2583  {
2584  	int ret = 0;
2585  
2586  	/*
2587  	 * Bridges that are not power-manageable directly only signal
2588  	 * wakeup on behalf of subordinate devices which is set up
2589  	 * elsewhere, so skip them. However, bridges that are
2590  	 * power-manageable may signal wakeup for themselves (for example,
2591  	 * on a hotplug event) and they need to be covered here.
2592  	 */
2593  	if (!pci_power_manageable(dev))
2594  		return 0;
2595  
2596  	/* Don't do the same thing twice in a row for one device. */
2597  	if (!!enable == !!dev->wakeup_prepared)
2598  		return 0;
2599  
2600  	/*
2601  	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2602  	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2603  	 * enable.  To disable wake-up we call the platform first, for symmetry.
2604  	 */
2605  
2606  	if (enable) {
2607  		int error;
2608  
2609  		/*
2610  		 * Enable PME signaling if the device can signal PME from
2611  		 * D3cold regardless of whether or not it can signal PME from
2612  		 * the current target state, because that will allow it to
2613  		 * signal PME when the hierarchy above it goes into D3cold and
2614  		 * the device itself ends up in D3cold as a result of that.
2615  		 */
2616  		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2617  			pci_pme_active(dev, true);
2618  		else
2619  			ret = 1;
2620  		error = platform_pci_set_wakeup(dev, true);
2621  		if (ret)
2622  			ret = error;
2623  		if (!ret)
2624  			dev->wakeup_prepared = true;
2625  	} else {
2626  		platform_pci_set_wakeup(dev, false);
2627  		pci_pme_active(dev, false);
2628  		dev->wakeup_prepared = false;
2629  	}
2630  
2631  	return ret;
2632  }
2633  
2634  /**
2635   * pci_enable_wake - change wakeup settings for a PCI device
2636   * @pci_dev: Target device
2637   * @state: PCI state from which device will issue wakeup events
2638   * @enable: Whether or not to enable event generation
2639   *
2640   * If @enable is set, check device_may_wakeup() for the device before calling
2641   * __pci_enable_wake() for it.
2642   */
pci_enable_wake(struct pci_dev * pci_dev,pci_power_t state,bool enable)2643  int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2644  {
2645  	if (enable && !device_may_wakeup(&pci_dev->dev))
2646  		return -EINVAL;
2647  
2648  	return __pci_enable_wake(pci_dev, state, enable);
2649  }
2650  EXPORT_SYMBOL(pci_enable_wake);
2651  
2652  /**
2653   * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2654   * @dev: PCI device to prepare
2655   * @enable: True to enable wake-up event generation; false to disable
2656   *
2657   * Many drivers want the device to wake up the system from D3_hot or D3_cold
2658   * and this function allows them to set that up cleanly - pci_enable_wake()
2659   * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2660   * ordering constraints.
2661   *
2662   * This function only returns error code if the device is not allowed to wake
2663   * up the system from sleep or it is not capable of generating PME# from both
2664   * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2665   */
pci_wake_from_d3(struct pci_dev * dev,bool enable)2666  int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2667  {
2668  	return pci_pme_capable(dev, PCI_D3cold) ?
2669  			pci_enable_wake(dev, PCI_D3cold, enable) :
2670  			pci_enable_wake(dev, PCI_D3hot, enable);
2671  }
2672  EXPORT_SYMBOL(pci_wake_from_d3);
2673  
2674  /**
2675   * pci_target_state - find an appropriate low power state for a given PCI dev
2676   * @dev: PCI device
2677   * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2678   *
2679   * Use underlying platform code to find a supported low power state for @dev.
2680   * If the platform can't manage @dev, return the deepest state from which it
2681   * can generate wake events, based on any available PME info.
2682   */
pci_target_state(struct pci_dev * dev,bool wakeup)2683  static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2684  {
2685  	if (platform_pci_power_manageable(dev)) {
2686  		/*
2687  		 * Call the platform to find the target state for the device.
2688  		 */
2689  		pci_power_t state = platform_pci_choose_state(dev);
2690  
2691  		switch (state) {
2692  		case PCI_POWER_ERROR:
2693  		case PCI_UNKNOWN:
2694  			return PCI_D3hot;
2695  
2696  		case PCI_D1:
2697  		case PCI_D2:
2698  			if (pci_no_d1d2(dev))
2699  				return PCI_D3hot;
2700  		}
2701  
2702  		return state;
2703  	}
2704  
2705  	/*
2706  	 * If the device is in D3cold even though it's not power-manageable by
2707  	 * the platform, it may have been powered down by non-standard means.
2708  	 * Best to let it slumber.
2709  	 */
2710  	if (dev->current_state == PCI_D3cold)
2711  		return PCI_D3cold;
2712  	else if (!dev->pm_cap)
2713  		return PCI_D0;
2714  
2715  	if (wakeup && dev->pme_support) {
2716  		pci_power_t state = PCI_D3hot;
2717  
2718  		/*
2719  		 * Find the deepest state from which the device can generate
2720  		 * PME#.
2721  		 */
2722  		while (state && !(dev->pme_support & (1 << state)))
2723  			state--;
2724  
2725  		if (state)
2726  			return state;
2727  		else if (dev->pme_support & 1)
2728  			return PCI_D0;
2729  	}
2730  
2731  	return PCI_D3hot;
2732  }
2733  
2734  /**
2735   * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2736   *			  into a sleep state
2737   * @dev: Device to handle.
2738   *
2739   * Choose the power state appropriate for the device depending on whether
2740   * it can wake up the system and/or is power manageable by the platform
2741   * (PCI_D3hot is the default) and put the device into that state.
2742   */
pci_prepare_to_sleep(struct pci_dev * dev)2743  int pci_prepare_to_sleep(struct pci_dev *dev)
2744  {
2745  	bool wakeup = device_may_wakeup(&dev->dev);
2746  	pci_power_t target_state = pci_target_state(dev, wakeup);
2747  	int error;
2748  
2749  	if (target_state == PCI_POWER_ERROR)
2750  		return -EIO;
2751  
2752  	pci_enable_wake(dev, target_state, wakeup);
2753  
2754  	error = pci_set_power_state(dev, target_state);
2755  
2756  	if (error)
2757  		pci_enable_wake(dev, target_state, false);
2758  
2759  	return error;
2760  }
2761  EXPORT_SYMBOL(pci_prepare_to_sleep);
2762  
2763  /**
2764   * pci_back_from_sleep - turn PCI device on during system-wide transition
2765   *			 into working state
2766   * @dev: Device to handle.
2767   *
2768   * Disable device's system wake-up capability and put it into D0.
2769   */
pci_back_from_sleep(struct pci_dev * dev)2770  int pci_back_from_sleep(struct pci_dev *dev)
2771  {
2772  	int ret = pci_set_power_state(dev, PCI_D0);
2773  
2774  	if (ret)
2775  		return ret;
2776  
2777  	pci_enable_wake(dev, PCI_D0, false);
2778  	return 0;
2779  }
2780  EXPORT_SYMBOL(pci_back_from_sleep);
2781  
2782  /**
2783   * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2784   * @dev: PCI device being suspended.
2785   *
2786   * Prepare @dev to generate wake-up events at run time and put it into a low
2787   * power state.
2788   */
pci_finish_runtime_suspend(struct pci_dev * dev)2789  int pci_finish_runtime_suspend(struct pci_dev *dev)
2790  {
2791  	pci_power_t target_state;
2792  	int error;
2793  
2794  	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2795  	if (target_state == PCI_POWER_ERROR)
2796  		return -EIO;
2797  
2798  	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2799  
2800  	error = pci_set_power_state(dev, target_state);
2801  
2802  	if (error)
2803  		pci_enable_wake(dev, target_state, false);
2804  
2805  	return error;
2806  }
2807  
2808  /**
2809   * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2810   * @dev: Device to check.
2811   *
2812   * Return true if the device itself is capable of generating wake-up events
2813   * (through the platform or using the native PCIe PME) or if the device supports
2814   * PME and one of its upstream bridges can generate wake-up events.
2815   */
pci_dev_run_wake(struct pci_dev * dev)2816  bool pci_dev_run_wake(struct pci_dev *dev)
2817  {
2818  	struct pci_bus *bus = dev->bus;
2819  
2820  	if (!dev->pme_support)
2821  		return false;
2822  
2823  	/* PME-capable in principle, but not from the target power state */
2824  	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2825  		return false;
2826  
2827  	if (device_can_wakeup(&dev->dev))
2828  		return true;
2829  
2830  	while (bus->parent) {
2831  		struct pci_dev *bridge = bus->self;
2832  
2833  		if (device_can_wakeup(&bridge->dev))
2834  			return true;
2835  
2836  		bus = bus->parent;
2837  	}
2838  
2839  	/* We have reached the root bus. */
2840  	if (bus->bridge)
2841  		return device_can_wakeup(bus->bridge);
2842  
2843  	return false;
2844  }
2845  EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2846  
2847  /**
2848   * pci_dev_need_resume - Check if it is necessary to resume the device.
2849   * @pci_dev: Device to check.
2850   *
2851   * Return 'true' if the device is not runtime-suspended or it has to be
2852   * reconfigured due to wakeup settings difference between system and runtime
2853   * suspend, or the current power state of it is not suitable for the upcoming
2854   * (system-wide) transition.
2855   */
pci_dev_need_resume(struct pci_dev * pci_dev)2856  bool pci_dev_need_resume(struct pci_dev *pci_dev)
2857  {
2858  	struct device *dev = &pci_dev->dev;
2859  	pci_power_t target_state;
2860  
2861  	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2862  		return true;
2863  
2864  	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2865  
2866  	/*
2867  	 * If the earlier platform check has not triggered, D3cold is just power
2868  	 * removal on top of D3hot, so no need to resume the device in that
2869  	 * case.
2870  	 */
2871  	return target_state != pci_dev->current_state &&
2872  		target_state != PCI_D3cold &&
2873  		pci_dev->current_state != PCI_D3hot;
2874  }
2875  
2876  /**
2877   * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2878   * @pci_dev: Device to check.
2879   *
2880   * If the device is suspended and it is not configured for system wakeup,
2881   * disable PME for it to prevent it from waking up the system unnecessarily.
2882   *
2883   * Note that if the device's power state is D3cold and the platform check in
2884   * pci_dev_need_resume() has not triggered, the device's configuration need not
2885   * be changed.
2886   */
pci_dev_adjust_pme(struct pci_dev * pci_dev)2887  void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2888  {
2889  	struct device *dev = &pci_dev->dev;
2890  
2891  	spin_lock_irq(&dev->power.lock);
2892  
2893  	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2894  	    pci_dev->current_state < PCI_D3cold)
2895  		__pci_pme_active(pci_dev, false);
2896  
2897  	spin_unlock_irq(&dev->power.lock);
2898  }
2899  
2900  /**
2901   * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2902   * @pci_dev: Device to handle.
2903   *
2904   * If the device is runtime suspended and wakeup-capable, enable PME for it as
2905   * it might have been disabled during the prepare phase of system suspend if
2906   * the device was not configured for system wakeup.
2907   */
pci_dev_complete_resume(struct pci_dev * pci_dev)2908  void pci_dev_complete_resume(struct pci_dev *pci_dev)
2909  {
2910  	struct device *dev = &pci_dev->dev;
2911  
2912  	if (!pci_dev_run_wake(pci_dev))
2913  		return;
2914  
2915  	spin_lock_irq(&dev->power.lock);
2916  
2917  	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2918  		__pci_pme_active(pci_dev, true);
2919  
2920  	spin_unlock_irq(&dev->power.lock);
2921  }
2922  
2923  /**
2924   * pci_choose_state - Choose the power state of a PCI device.
2925   * @dev: Target PCI device.
2926   * @state: Target state for the whole system.
2927   *
2928   * Returns PCI power state suitable for @dev and @state.
2929   */
pci_choose_state(struct pci_dev * dev,pm_message_t state)2930  pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2931  {
2932  	if (state.event == PM_EVENT_ON)
2933  		return PCI_D0;
2934  
2935  	return pci_target_state(dev, false);
2936  }
2937  EXPORT_SYMBOL(pci_choose_state);
2938  
pci_config_pm_runtime_get(struct pci_dev * pdev)2939  void pci_config_pm_runtime_get(struct pci_dev *pdev)
2940  {
2941  	struct device *dev = &pdev->dev;
2942  	struct device *parent = dev->parent;
2943  
2944  	if (parent)
2945  		pm_runtime_get_sync(parent);
2946  	pm_runtime_get_noresume(dev);
2947  	/*
2948  	 * pdev->current_state is set to PCI_D3cold during suspending,
2949  	 * so wait until suspending completes
2950  	 */
2951  	pm_runtime_barrier(dev);
2952  	/*
2953  	 * Only need to resume devices in D3cold, because config
2954  	 * registers are still accessible for devices suspended but
2955  	 * not in D3cold.
2956  	 */
2957  	if (pdev->current_state == PCI_D3cold)
2958  		pm_runtime_resume(dev);
2959  }
2960  
pci_config_pm_runtime_put(struct pci_dev * pdev)2961  void pci_config_pm_runtime_put(struct pci_dev *pdev)
2962  {
2963  	struct device *dev = &pdev->dev;
2964  	struct device *parent = dev->parent;
2965  
2966  	pm_runtime_put(dev);
2967  	if (parent)
2968  		pm_runtime_put_sync(parent);
2969  }
2970  
2971  static const struct dmi_system_id bridge_d3_blacklist[] = {
2972  #ifdef CONFIG_X86
2973  	{
2974  		/*
2975  		 * Gigabyte X299 root port is not marked as hotplug capable
2976  		 * which allows Linux to power manage it.  However, this
2977  		 * confuses the BIOS SMI handler so don't power manage root
2978  		 * ports on that system.
2979  		 */
2980  		.ident = "X299 DESIGNARE EX-CF",
2981  		.matches = {
2982  			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2983  			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2984  		},
2985  	},
2986  	{
2987  		/*
2988  		 * Downstream device is not accessible after putting a root port
2989  		 * into D3cold and back into D0 on Elo Continental Z2 board
2990  		 */
2991  		.ident = "Elo Continental Z2",
2992  		.matches = {
2993  			DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"),
2994  			DMI_MATCH(DMI_BOARD_NAME, "Geminilake"),
2995  			DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
2996  		},
2997  	},
2998  	{
2999  		/*
3000  		 * Changing power state of root port dGPU is connected fails
3001  		 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
3002  		 */
3003  		.ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
3004  		.matches = {
3005  			DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
3006  			DMI_MATCH(DMI_BOARD_NAME, "1972"),
3007  			DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
3008  		},
3009  	},
3010  #endif
3011  	{ }
3012  };
3013  
3014  /**
3015   * pci_bridge_d3_possible - Is it possible to put the bridge into D3
3016   * @bridge: Bridge to check
3017   *
3018   * This function checks if it is possible to move the bridge to D3.
3019   * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
3020   */
pci_bridge_d3_possible(struct pci_dev * bridge)3021  bool pci_bridge_d3_possible(struct pci_dev *bridge)
3022  {
3023  	if (!pci_is_pcie(bridge))
3024  		return false;
3025  
3026  	switch (pci_pcie_type(bridge)) {
3027  	case PCI_EXP_TYPE_ROOT_PORT:
3028  	case PCI_EXP_TYPE_UPSTREAM:
3029  	case PCI_EXP_TYPE_DOWNSTREAM:
3030  		if (pci_bridge_d3_disable)
3031  			return false;
3032  
3033  		/*
3034  		 * Hotplug ports handled by firmware in System Management Mode
3035  		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
3036  		 */
3037  		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
3038  			return false;
3039  
3040  		if (pci_bridge_d3_force)
3041  			return true;
3042  
3043  		/* Even the oldest 2010 Thunderbolt controller supports D3. */
3044  		if (bridge->is_thunderbolt)
3045  			return true;
3046  
3047  		/* Platform might know better if the bridge supports D3 */
3048  		if (platform_pci_bridge_d3(bridge))
3049  			return true;
3050  
3051  		/*
3052  		 * Hotplug ports handled natively by the OS were not validated
3053  		 * by vendors for runtime D3 at least until 2018 because there
3054  		 * was no OS support.
3055  		 */
3056  		if (bridge->is_hotplug_bridge)
3057  			return false;
3058  
3059  		if (dmi_check_system(bridge_d3_blacklist))
3060  			return false;
3061  
3062  		/*
3063  		 * It should be safe to put PCIe ports from 2015 or newer
3064  		 * to D3.
3065  		 */
3066  		if (dmi_get_bios_year() >= 2015)
3067  			return true;
3068  		break;
3069  	}
3070  
3071  	return false;
3072  }
3073  
pci_dev_check_d3cold(struct pci_dev * dev,void * data)3074  static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3075  {
3076  	bool *d3cold_ok = data;
3077  
3078  	if (/* The device needs to be allowed to go D3cold ... */
3079  	    dev->no_d3cold || !dev->d3cold_allowed ||
3080  
3081  	    /* ... and if it is wakeup capable to do so from D3cold. */
3082  	    (device_may_wakeup(&dev->dev) &&
3083  	     !pci_pme_capable(dev, PCI_D3cold)) ||
3084  
3085  	    /* If it is a bridge it must be allowed to go to D3. */
3086  	    !pci_power_manageable(dev))
3087  
3088  		*d3cold_ok = false;
3089  
3090  	return !*d3cold_ok;
3091  }
3092  
3093  /*
3094   * pci_bridge_d3_update - Update bridge D3 capabilities
3095   * @dev: PCI device which is changed
3096   *
3097   * Update upstream bridge PM capabilities accordingly depending on if the
3098   * device PM configuration was changed or the device is being removed.  The
3099   * change is also propagated upstream.
3100   */
pci_bridge_d3_update(struct pci_dev * dev)3101  void pci_bridge_d3_update(struct pci_dev *dev)
3102  {
3103  	bool remove = !device_is_registered(&dev->dev);
3104  	struct pci_dev *bridge;
3105  	bool d3cold_ok = true;
3106  
3107  	bridge = pci_upstream_bridge(dev);
3108  	if (!bridge || !pci_bridge_d3_possible(bridge))
3109  		return;
3110  
3111  	/*
3112  	 * If D3 is currently allowed for the bridge, removing one of its
3113  	 * children won't change that.
3114  	 */
3115  	if (remove && bridge->bridge_d3)
3116  		return;
3117  
3118  	/*
3119  	 * If D3 is currently allowed for the bridge and a child is added or
3120  	 * changed, disallowance of D3 can only be caused by that child, so
3121  	 * we only need to check that single device, not any of its siblings.
3122  	 *
3123  	 * If D3 is currently not allowed for the bridge, checking the device
3124  	 * first may allow us to skip checking its siblings.
3125  	 */
3126  	if (!remove)
3127  		pci_dev_check_d3cold(dev, &d3cold_ok);
3128  
3129  	/*
3130  	 * If D3 is currently not allowed for the bridge, this may be caused
3131  	 * either by the device being changed/removed or any of its siblings,
3132  	 * so we need to go through all children to find out if one of them
3133  	 * continues to block D3.
3134  	 */
3135  	if (d3cold_ok && !bridge->bridge_d3)
3136  		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3137  			     &d3cold_ok);
3138  
3139  	if (bridge->bridge_d3 != d3cold_ok) {
3140  		bridge->bridge_d3 = d3cold_ok;
3141  		/* Propagate change to upstream bridges */
3142  		pci_bridge_d3_update(bridge);
3143  	}
3144  }
3145  
3146  /**
3147   * pci_d3cold_enable - Enable D3cold for device
3148   * @dev: PCI device to handle
3149   *
3150   * This function can be used in drivers to enable D3cold from the device
3151   * they handle.  It also updates upstream PCI bridge PM capabilities
3152   * accordingly.
3153   */
pci_d3cold_enable(struct pci_dev * dev)3154  void pci_d3cold_enable(struct pci_dev *dev)
3155  {
3156  	if (dev->no_d3cold) {
3157  		dev->no_d3cold = false;
3158  		pci_bridge_d3_update(dev);
3159  	}
3160  }
3161  EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3162  
3163  /**
3164   * pci_d3cold_disable - Disable D3cold for device
3165   * @dev: PCI device to handle
3166   *
3167   * This function can be used in drivers to disable D3cold from the device
3168   * they handle.  It also updates upstream PCI bridge PM capabilities
3169   * accordingly.
3170   */
pci_d3cold_disable(struct pci_dev * dev)3171  void pci_d3cold_disable(struct pci_dev *dev)
3172  {
3173  	if (!dev->no_d3cold) {
3174  		dev->no_d3cold = true;
3175  		pci_bridge_d3_update(dev);
3176  	}
3177  }
3178  EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3179  
3180  /**
3181   * pci_pm_init - Initialize PM functions of given PCI device
3182   * @dev: PCI device to handle.
3183   */
pci_pm_init(struct pci_dev * dev)3184  void pci_pm_init(struct pci_dev *dev)
3185  {
3186  	int pm;
3187  	u16 status;
3188  	u16 pmc;
3189  
3190  	pm_runtime_forbid(&dev->dev);
3191  	pm_runtime_set_active(&dev->dev);
3192  	pm_runtime_enable(&dev->dev);
3193  	device_enable_async_suspend(&dev->dev);
3194  	dev->wakeup_prepared = false;
3195  
3196  	dev->pm_cap = 0;
3197  	dev->pme_support = 0;
3198  
3199  	/* find PCI PM capability in list */
3200  	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3201  	if (!pm)
3202  		return;
3203  	/* Check device's ability to generate PME# */
3204  	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3205  
3206  	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3207  		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3208  			pmc & PCI_PM_CAP_VER_MASK);
3209  		return;
3210  	}
3211  
3212  	dev->pm_cap = pm;
3213  	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3214  	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3215  	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3216  	dev->d3cold_allowed = true;
3217  
3218  	dev->d1_support = false;
3219  	dev->d2_support = false;
3220  	if (!pci_no_d1d2(dev)) {
3221  		if (pmc & PCI_PM_CAP_D1)
3222  			dev->d1_support = true;
3223  		if (pmc & PCI_PM_CAP_D2)
3224  			dev->d2_support = true;
3225  
3226  		if (dev->d1_support || dev->d2_support)
3227  			pci_info(dev, "supports%s%s\n",
3228  				   dev->d1_support ? " D1" : "",
3229  				   dev->d2_support ? " D2" : "");
3230  	}
3231  
3232  	pmc &= PCI_PM_CAP_PME_MASK;
3233  	if (pmc) {
3234  		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
3235  			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3236  			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3237  			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3238  			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3239  			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3240  		dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc);
3241  		dev->pme_poll = true;
3242  		/*
3243  		 * Make device's PM flags reflect the wake-up capability, but
3244  		 * let the user space enable it to wake up the system as needed.
3245  		 */
3246  		device_set_wakeup_capable(&dev->dev, true);
3247  		/* Disable the PME# generation functionality */
3248  		pci_pme_active(dev, false);
3249  	}
3250  
3251  	pci_read_config_word(dev, PCI_STATUS, &status);
3252  	if (status & PCI_STATUS_IMM_READY)
3253  		dev->imm_ready = 1;
3254  }
3255  
pci_ea_flags(struct pci_dev * dev,u8 prop)3256  static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3257  {
3258  	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3259  
3260  	switch (prop) {
3261  	case PCI_EA_P_MEM:
3262  	case PCI_EA_P_VF_MEM:
3263  		flags |= IORESOURCE_MEM;
3264  		break;
3265  	case PCI_EA_P_MEM_PREFETCH:
3266  	case PCI_EA_P_VF_MEM_PREFETCH:
3267  		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3268  		break;
3269  	case PCI_EA_P_IO:
3270  		flags |= IORESOURCE_IO;
3271  		break;
3272  	default:
3273  		return 0;
3274  	}
3275  
3276  	return flags;
3277  }
3278  
pci_ea_get_resource(struct pci_dev * dev,u8 bei,u8 prop)3279  static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3280  					    u8 prop)
3281  {
3282  	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3283  		return &dev->resource[bei];
3284  #ifdef CONFIG_PCI_IOV
3285  	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3286  		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3287  		return &dev->resource[PCI_IOV_RESOURCES +
3288  				      bei - PCI_EA_BEI_VF_BAR0];
3289  #endif
3290  	else if (bei == PCI_EA_BEI_ROM)
3291  		return &dev->resource[PCI_ROM_RESOURCE];
3292  	else
3293  		return NULL;
3294  }
3295  
3296  /* Read an Enhanced Allocation (EA) entry */
pci_ea_read(struct pci_dev * dev,int offset)3297  static int pci_ea_read(struct pci_dev *dev, int offset)
3298  {
3299  	struct resource *res;
3300  	const char *res_name;
3301  	int ent_size, ent_offset = offset;
3302  	resource_size_t start, end;
3303  	unsigned long flags;
3304  	u32 dw0, bei, base, max_offset;
3305  	u8 prop;
3306  	bool support_64 = (sizeof(resource_size_t) >= 8);
3307  
3308  	pci_read_config_dword(dev, ent_offset, &dw0);
3309  	ent_offset += 4;
3310  
3311  	/* Entry size field indicates DWORDs after 1st */
3312  	ent_size = (FIELD_GET(PCI_EA_ES, dw0) + 1) << 2;
3313  
3314  	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3315  		goto out;
3316  
3317  	bei = FIELD_GET(PCI_EA_BEI, dw0);
3318  	prop = FIELD_GET(PCI_EA_PP, dw0);
3319  
3320  	/*
3321  	 * If the Property is in the reserved range, try the Secondary
3322  	 * Property instead.
3323  	 */
3324  	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3325  		prop = FIELD_GET(PCI_EA_SP, dw0);
3326  	if (prop > PCI_EA_P_BRIDGE_IO)
3327  		goto out;
3328  
3329  	res = pci_ea_get_resource(dev, bei, prop);
3330  	res_name = pci_resource_name(dev, bei);
3331  	if (!res) {
3332  		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3333  		goto out;
3334  	}
3335  
3336  	flags = pci_ea_flags(dev, prop);
3337  	if (!flags) {
3338  		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3339  		goto out;
3340  	}
3341  
3342  	/* Read Base */
3343  	pci_read_config_dword(dev, ent_offset, &base);
3344  	start = (base & PCI_EA_FIELD_MASK);
3345  	ent_offset += 4;
3346  
3347  	/* Read MaxOffset */
3348  	pci_read_config_dword(dev, ent_offset, &max_offset);
3349  	ent_offset += 4;
3350  
3351  	/* Read Base MSBs (if 64-bit entry) */
3352  	if (base & PCI_EA_IS_64) {
3353  		u32 base_upper;
3354  
3355  		pci_read_config_dword(dev, ent_offset, &base_upper);
3356  		ent_offset += 4;
3357  
3358  		flags |= IORESOURCE_MEM_64;
3359  
3360  		/* entry starts above 32-bit boundary, can't use */
3361  		if (!support_64 && base_upper)
3362  			goto out;
3363  
3364  		if (support_64)
3365  			start |= ((u64)base_upper << 32);
3366  	}
3367  
3368  	end = start + (max_offset | 0x03);
3369  
3370  	/* Read MaxOffset MSBs (if 64-bit entry) */
3371  	if (max_offset & PCI_EA_IS_64) {
3372  		u32 max_offset_upper;
3373  
3374  		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3375  		ent_offset += 4;
3376  
3377  		flags |= IORESOURCE_MEM_64;
3378  
3379  		/* entry too big, can't use */
3380  		if (!support_64 && max_offset_upper)
3381  			goto out;
3382  
3383  		if (support_64)
3384  			end += ((u64)max_offset_upper << 32);
3385  	}
3386  
3387  	if (end < start) {
3388  		pci_err(dev, "EA Entry crosses address boundary\n");
3389  		goto out;
3390  	}
3391  
3392  	if (ent_size != ent_offset - offset) {
3393  		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3394  			ent_size, ent_offset - offset);
3395  		goto out;
3396  	}
3397  
3398  	res->name = pci_name(dev);
3399  	res->start = start;
3400  	res->end = end;
3401  	res->flags = flags;
3402  
3403  	if (bei <= PCI_EA_BEI_BAR5)
3404  		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3405  			 res_name, res, prop);
3406  	else if (bei == PCI_EA_BEI_ROM)
3407  		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3408  			 res_name, res, prop);
3409  	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3410  		pci_info(dev, "%s %pR: from Enhanced Allocation, properties %#02x\n",
3411  			 res_name, res, prop);
3412  	else
3413  		pci_info(dev, "BEI %d %pR: from Enhanced Allocation, properties %#02x\n",
3414  			   bei, res, prop);
3415  
3416  out:
3417  	return offset + ent_size;
3418  }
3419  
3420  /* Enhanced Allocation Initialization */
pci_ea_init(struct pci_dev * dev)3421  void pci_ea_init(struct pci_dev *dev)
3422  {
3423  	int ea;
3424  	u8 num_ent;
3425  	int offset;
3426  	int i;
3427  
3428  	/* find PCI EA capability in list */
3429  	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3430  	if (!ea)
3431  		return;
3432  
3433  	/* determine the number of entries */
3434  	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3435  					&num_ent);
3436  	num_ent &= PCI_EA_NUM_ENT_MASK;
3437  
3438  	offset = ea + PCI_EA_FIRST_ENT;
3439  
3440  	/* Skip DWORD 2 for type 1 functions */
3441  	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3442  		offset += 4;
3443  
3444  	/* parse each EA entry */
3445  	for (i = 0; i < num_ent; ++i)
3446  		offset = pci_ea_read(dev, offset);
3447  }
3448  
pci_add_saved_cap(struct pci_dev * pci_dev,struct pci_cap_saved_state * new_cap)3449  static void pci_add_saved_cap(struct pci_dev *pci_dev,
3450  	struct pci_cap_saved_state *new_cap)
3451  {
3452  	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3453  }
3454  
3455  /**
3456   * _pci_add_cap_save_buffer - allocate buffer for saving given
3457   *			      capability registers
3458   * @dev: the PCI device
3459   * @cap: the capability to allocate the buffer for
3460   * @extended: Standard or Extended capability ID
3461   * @size: requested size of the buffer
3462   */
_pci_add_cap_save_buffer(struct pci_dev * dev,u16 cap,bool extended,unsigned int size)3463  static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3464  				    bool extended, unsigned int size)
3465  {
3466  	int pos;
3467  	struct pci_cap_saved_state *save_state;
3468  
3469  	if (extended)
3470  		pos = pci_find_ext_capability(dev, cap);
3471  	else
3472  		pos = pci_find_capability(dev, cap);
3473  
3474  	if (!pos)
3475  		return 0;
3476  
3477  	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3478  	if (!save_state)
3479  		return -ENOMEM;
3480  
3481  	save_state->cap.cap_nr = cap;
3482  	save_state->cap.cap_extended = extended;
3483  	save_state->cap.size = size;
3484  	pci_add_saved_cap(dev, save_state);
3485  
3486  	return 0;
3487  }
3488  
pci_add_cap_save_buffer(struct pci_dev * dev,char cap,unsigned int size)3489  int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3490  {
3491  	return _pci_add_cap_save_buffer(dev, cap, false, size);
3492  }
3493  
pci_add_ext_cap_save_buffer(struct pci_dev * dev,u16 cap,unsigned int size)3494  int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3495  {
3496  	return _pci_add_cap_save_buffer(dev, cap, true, size);
3497  }
3498  
3499  /**
3500   * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3501   * @dev: the PCI device
3502   */
pci_allocate_cap_save_buffers(struct pci_dev * dev)3503  void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3504  {
3505  	int error;
3506  
3507  	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3508  					PCI_EXP_SAVE_REGS * sizeof(u16));
3509  	if (error)
3510  		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
3511  
3512  	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3513  	if (error)
3514  		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3515  
3516  	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3517  					    2 * sizeof(u16));
3518  	if (error)
3519  		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3520  
3521  	pci_allocate_vc_save_buffers(dev);
3522  }
3523  
pci_free_cap_save_buffers(struct pci_dev * dev)3524  void pci_free_cap_save_buffers(struct pci_dev *dev)
3525  {
3526  	struct pci_cap_saved_state *tmp;
3527  	struct hlist_node *n;
3528  
3529  	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3530  		kfree(tmp);
3531  }
3532  
3533  /**
3534   * pci_configure_ari - enable or disable ARI forwarding
3535   * @dev: the PCI device
3536   *
3537   * If @dev and its upstream bridge both support ARI, enable ARI in the
3538   * bridge.  Otherwise, disable ARI in the bridge.
3539   */
pci_configure_ari(struct pci_dev * dev)3540  void pci_configure_ari(struct pci_dev *dev)
3541  {
3542  	u32 cap;
3543  	struct pci_dev *bridge;
3544  
3545  	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3546  		return;
3547  
3548  	bridge = dev->bus->self;
3549  	if (!bridge)
3550  		return;
3551  
3552  	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3553  	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3554  		return;
3555  
3556  	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3557  		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3558  					 PCI_EXP_DEVCTL2_ARI);
3559  		bridge->ari_enabled = 1;
3560  	} else {
3561  		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3562  					   PCI_EXP_DEVCTL2_ARI);
3563  		bridge->ari_enabled = 0;
3564  	}
3565  }
3566  
pci_acs_flags_enabled(struct pci_dev * pdev,u16 acs_flags)3567  static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
3568  {
3569  	int pos;
3570  	u16 cap, ctrl;
3571  
3572  	pos = pdev->acs_cap;
3573  	if (!pos)
3574  		return false;
3575  
3576  	/*
3577  	 * Except for egress control, capabilities are either required
3578  	 * or only required if controllable.  Features missing from the
3579  	 * capability field can therefore be assumed as hard-wired enabled.
3580  	 */
3581  	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3582  	acs_flags &= (cap | PCI_ACS_EC);
3583  
3584  	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3585  	return (ctrl & acs_flags) == acs_flags;
3586  }
3587  
3588  /**
3589   * pci_acs_enabled - test ACS against required flags for a given device
3590   * @pdev: device to test
3591   * @acs_flags: required PCI ACS flags
3592   *
3593   * Return true if the device supports the provided flags.  Automatically
3594   * filters out flags that are not implemented on multifunction devices.
3595   *
3596   * Note that this interface checks the effective ACS capabilities of the
3597   * device rather than the actual capabilities.  For instance, most single
3598   * function endpoints are not required to support ACS because they have no
3599   * opportunity for peer-to-peer access.  We therefore return 'true'
3600   * regardless of whether the device exposes an ACS capability.  This makes
3601   * it much easier for callers of this function to ignore the actual type
3602   * or topology of the device when testing ACS support.
3603   */
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)3604  bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3605  {
3606  	int ret;
3607  
3608  	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3609  	if (ret >= 0)
3610  		return ret > 0;
3611  
3612  	/*
3613  	 * Conventional PCI and PCI-X devices never support ACS, either
3614  	 * effectively or actually.  The shared bus topology implies that
3615  	 * any device on the bus can receive or snoop DMA.
3616  	 */
3617  	if (!pci_is_pcie(pdev))
3618  		return false;
3619  
3620  	switch (pci_pcie_type(pdev)) {
3621  	/*
3622  	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3623  	 * but since their primary interface is PCI/X, we conservatively
3624  	 * handle them as we would a non-PCIe device.
3625  	 */
3626  	case PCI_EXP_TYPE_PCIE_BRIDGE:
3627  	/*
3628  	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3629  	 * applicable... must never implement an ACS Extended Capability...".
3630  	 * This seems arbitrary, but we take a conservative interpretation
3631  	 * of this statement.
3632  	 */
3633  	case PCI_EXP_TYPE_PCI_BRIDGE:
3634  	case PCI_EXP_TYPE_RC_EC:
3635  		return false;
3636  	/*
3637  	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3638  	 * implement ACS in order to indicate their peer-to-peer capabilities,
3639  	 * regardless of whether they are single- or multi-function devices.
3640  	 */
3641  	case PCI_EXP_TYPE_DOWNSTREAM:
3642  	case PCI_EXP_TYPE_ROOT_PORT:
3643  		return pci_acs_flags_enabled(pdev, acs_flags);
3644  	/*
3645  	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3646  	 * implemented by the remaining PCIe types to indicate peer-to-peer
3647  	 * capabilities, but only when they are part of a multifunction
3648  	 * device.  The footnote for section 6.12 indicates the specific
3649  	 * PCIe types included here.
3650  	 */
3651  	case PCI_EXP_TYPE_ENDPOINT:
3652  	case PCI_EXP_TYPE_UPSTREAM:
3653  	case PCI_EXP_TYPE_LEG_END:
3654  	case PCI_EXP_TYPE_RC_END:
3655  		if (!pdev->multifunction)
3656  			break;
3657  
3658  		return pci_acs_flags_enabled(pdev, acs_flags);
3659  	}
3660  
3661  	/*
3662  	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3663  	 * to single function devices with the exception of downstream ports.
3664  	 */
3665  	return true;
3666  }
3667  
3668  /**
3669   * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3670   * @start: starting downstream device
3671   * @end: ending upstream device or NULL to search to the root bus
3672   * @acs_flags: required flags
3673   *
3674   * Walk up a device tree from start to end testing PCI ACS support.  If
3675   * any step along the way does not support the required flags, return false.
3676   */
pci_acs_path_enabled(struct pci_dev * start,struct pci_dev * end,u16 acs_flags)3677  bool pci_acs_path_enabled(struct pci_dev *start,
3678  			  struct pci_dev *end, u16 acs_flags)
3679  {
3680  	struct pci_dev *pdev, *parent = start;
3681  
3682  	do {
3683  		pdev = parent;
3684  
3685  		if (!pci_acs_enabled(pdev, acs_flags))
3686  			return false;
3687  
3688  		if (pci_is_root_bus(pdev->bus))
3689  			return (end == NULL);
3690  
3691  		parent = pdev->bus->self;
3692  	} while (pdev != end);
3693  
3694  	return true;
3695  }
3696  
3697  /**
3698   * pci_acs_init - Initialize ACS if hardware supports it
3699   * @dev: the PCI device
3700   */
pci_acs_init(struct pci_dev * dev)3701  void pci_acs_init(struct pci_dev *dev)
3702  {
3703  	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3704  
3705  	/*
3706  	 * Attempt to enable ACS regardless of capability because some Root
3707  	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3708  	 * the standard ACS capability but still support ACS via those
3709  	 * quirks.
3710  	 */
3711  	pci_enable_acs(dev);
3712  }
3713  
3714  /**
3715   * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3716   * @pdev: PCI device
3717   * @bar: BAR to find
3718   *
3719   * Helper to find the position of the ctrl register for a BAR.
3720   * Returns -ENOTSUPP if resizable BARs are not supported at all.
3721   * Returns -ENOENT if no ctrl register for the BAR could be found.
3722   */
pci_rebar_find_pos(struct pci_dev * pdev,int bar)3723  static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3724  {
3725  	unsigned int pos, nbars, i;
3726  	u32 ctrl;
3727  
3728  	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3729  	if (!pos)
3730  		return -ENOTSUPP;
3731  
3732  	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3733  	nbars = FIELD_GET(PCI_REBAR_CTRL_NBAR_MASK, ctrl);
3734  
3735  	for (i = 0; i < nbars; i++, pos += 8) {
3736  		int bar_idx;
3737  
3738  		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3739  		bar_idx = FIELD_GET(PCI_REBAR_CTRL_BAR_IDX, ctrl);
3740  		if (bar_idx == bar)
3741  			return pos;
3742  	}
3743  
3744  	return -ENOENT;
3745  }
3746  
3747  /**
3748   * pci_rebar_get_possible_sizes - get possible sizes for BAR
3749   * @pdev: PCI device
3750   * @bar: BAR to query
3751   *
3752   * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3753   * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3754   */
pci_rebar_get_possible_sizes(struct pci_dev * pdev,int bar)3755  u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3756  {
3757  	int pos;
3758  	u32 cap;
3759  
3760  	pos = pci_rebar_find_pos(pdev, bar);
3761  	if (pos < 0)
3762  		return 0;
3763  
3764  	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3765  	cap = FIELD_GET(PCI_REBAR_CAP_SIZES, cap);
3766  
3767  	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3768  	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3769  	    bar == 0 && cap == 0x700)
3770  		return 0x3f00;
3771  
3772  	return cap;
3773  }
3774  EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3775  
3776  /**
3777   * pci_rebar_get_current_size - get the current size of a BAR
3778   * @pdev: PCI device
3779   * @bar: BAR to set size to
3780   *
3781   * Read the size of a BAR from the resizable BAR config.
3782   * Returns size if found or negative error code.
3783   */
pci_rebar_get_current_size(struct pci_dev * pdev,int bar)3784  int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3785  {
3786  	int pos;
3787  	u32 ctrl;
3788  
3789  	pos = pci_rebar_find_pos(pdev, bar);
3790  	if (pos < 0)
3791  		return pos;
3792  
3793  	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3794  	return FIELD_GET(PCI_REBAR_CTRL_BAR_SIZE, ctrl);
3795  }
3796  
3797  /**
3798   * pci_rebar_set_size - set a new size for a BAR
3799   * @pdev: PCI device
3800   * @bar: BAR to set size to
3801   * @size: new size as defined in the spec (0=1MB, 19=512GB)
3802   *
3803   * Set the new size of a BAR as defined in the spec.
3804   * Returns zero if resizing was successful, error code otherwise.
3805   */
pci_rebar_set_size(struct pci_dev * pdev,int bar,int size)3806  int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3807  {
3808  	int pos;
3809  	u32 ctrl;
3810  
3811  	pos = pci_rebar_find_pos(pdev, bar);
3812  	if (pos < 0)
3813  		return pos;
3814  
3815  	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3816  	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3817  	ctrl |= FIELD_PREP(PCI_REBAR_CTRL_BAR_SIZE, size);
3818  	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3819  	return 0;
3820  }
3821  
3822  /**
3823   * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3824   * @dev: the PCI device
3825   * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3826   *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3827   *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3828   *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3829   *
3830   * Return 0 if all upstream bridges support AtomicOp routing, egress
3831   * blocking is disabled on all upstream ports, and the root port supports
3832   * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3833   * AtomicOp completion), or negative otherwise.
3834   */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)3835  int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3836  {
3837  	struct pci_bus *bus = dev->bus;
3838  	struct pci_dev *bridge;
3839  	u32 cap, ctl2;
3840  
3841  	/*
3842  	 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3843  	 * in Device Control 2 is reserved in VFs and the PF value applies
3844  	 * to all associated VFs.
3845  	 */
3846  	if (dev->is_virtfn)
3847  		return -EINVAL;
3848  
3849  	if (!pci_is_pcie(dev))
3850  		return -EINVAL;
3851  
3852  	/*
3853  	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3854  	 * AtomicOp requesters.  For now, we only support endpoints as
3855  	 * requesters and root ports as completers.  No endpoints as
3856  	 * completers, and no peer-to-peer.
3857  	 */
3858  
3859  	switch (pci_pcie_type(dev)) {
3860  	case PCI_EXP_TYPE_ENDPOINT:
3861  	case PCI_EXP_TYPE_LEG_END:
3862  	case PCI_EXP_TYPE_RC_END:
3863  		break;
3864  	default:
3865  		return -EINVAL;
3866  	}
3867  
3868  	while (bus->parent) {
3869  		bridge = bus->self;
3870  
3871  		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3872  
3873  		switch (pci_pcie_type(bridge)) {
3874  		/* Ensure switch ports support AtomicOp routing */
3875  		case PCI_EXP_TYPE_UPSTREAM:
3876  		case PCI_EXP_TYPE_DOWNSTREAM:
3877  			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3878  				return -EINVAL;
3879  			break;
3880  
3881  		/* Ensure root port supports all the sizes we care about */
3882  		case PCI_EXP_TYPE_ROOT_PORT:
3883  			if ((cap & cap_mask) != cap_mask)
3884  				return -EINVAL;
3885  			break;
3886  		}
3887  
3888  		/* Ensure upstream ports don't block AtomicOps on egress */
3889  		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3890  			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3891  						   &ctl2);
3892  			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3893  				return -EINVAL;
3894  		}
3895  
3896  		bus = bus->parent;
3897  	}
3898  
3899  	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3900  				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3901  	return 0;
3902  }
3903  EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3904  
3905  /**
3906   * pci_release_region - Release a PCI bar
3907   * @pdev: PCI device whose resources were previously reserved by
3908   *	  pci_request_region()
3909   * @bar: BAR to release
3910   *
3911   * Releases the PCI I/O and memory resources previously reserved by a
3912   * successful call to pci_request_region().  Call this function only
3913   * after all use of the PCI regions has ceased.
3914   */
pci_release_region(struct pci_dev * pdev,int bar)3915  void pci_release_region(struct pci_dev *pdev, int bar)
3916  {
3917  	/*
3918  	 * This is done for backwards compatibility, because the old PCI devres
3919  	 * API had a mode in which the function became managed if it had been
3920  	 * enabled with pcim_enable_device() instead of pci_enable_device().
3921  	 */
3922  	if (pci_is_managed(pdev)) {
3923  		pcim_release_region(pdev, bar);
3924  		return;
3925  	}
3926  
3927  	if (pci_resource_len(pdev, bar) == 0)
3928  		return;
3929  	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3930  		release_region(pci_resource_start(pdev, bar),
3931  				pci_resource_len(pdev, bar));
3932  	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3933  		release_mem_region(pci_resource_start(pdev, bar),
3934  				pci_resource_len(pdev, bar));
3935  }
3936  EXPORT_SYMBOL(pci_release_region);
3937  
3938  /**
3939   * __pci_request_region - Reserved PCI I/O and memory resource
3940   * @pdev: PCI device whose resources are to be reserved
3941   * @bar: BAR to be reserved
3942   * @res_name: Name to be associated with resource.
3943   * @exclusive: whether the region access is exclusive or not
3944   *
3945   * Returns: 0 on success, negative error code on failure.
3946   *
3947   * Mark the PCI region associated with PCI device @pdev BAR @bar as
3948   * being reserved by owner @res_name.  Do not access any
3949   * address inside the PCI regions unless this call returns
3950   * successfully.
3951   *
3952   * If @exclusive is set, then the region is marked so that userspace
3953   * is explicitly not allowed to map the resource via /dev/mem or
3954   * sysfs MMIO access.
3955   *
3956   * Returns 0 on success, or %EBUSY on error.  A warning
3957   * message is also printed on failure.
3958   */
__pci_request_region(struct pci_dev * pdev,int bar,const char * res_name,int exclusive)3959  static int __pci_request_region(struct pci_dev *pdev, int bar,
3960  				const char *res_name, int exclusive)
3961  {
3962  	if (pci_is_managed(pdev)) {
3963  		if (exclusive == IORESOURCE_EXCLUSIVE)
3964  			return pcim_request_region_exclusive(pdev, bar, res_name);
3965  
3966  		return pcim_request_region(pdev, bar, res_name);
3967  	}
3968  
3969  	if (pci_resource_len(pdev, bar) == 0)
3970  		return 0;
3971  
3972  	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3973  		if (!request_region(pci_resource_start(pdev, bar),
3974  			    pci_resource_len(pdev, bar), res_name))
3975  			goto err_out;
3976  	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
3977  		if (!__request_mem_region(pci_resource_start(pdev, bar),
3978  					pci_resource_len(pdev, bar), res_name,
3979  					exclusive))
3980  			goto err_out;
3981  	}
3982  
3983  	return 0;
3984  
3985  err_out:
3986  	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3987  		 &pdev->resource[bar]);
3988  	return -EBUSY;
3989  }
3990  
3991  /**
3992   * pci_request_region - Reserve PCI I/O and memory resource
3993   * @pdev: PCI device whose resources are to be reserved
3994   * @bar: BAR to be reserved
3995   * @res_name: Name to be associated with resource
3996   *
3997   * Returns: 0 on success, negative error code on failure.
3998   *
3999   * Mark the PCI region associated with PCI device @pdev BAR @bar as
4000   * being reserved by owner @res_name.  Do not access any
4001   * address inside the PCI regions unless this call returns
4002   * successfully.
4003   *
4004   * Returns 0 on success, or %EBUSY on error.  A warning
4005   * message is also printed on failure.
4006   *
4007   * NOTE:
4008   * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4009   * when pcim_enable_device() has been called in advance. This hybrid feature is
4010   * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4011   */
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)4012  int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4013  {
4014  	return __pci_request_region(pdev, bar, res_name, 0);
4015  }
4016  EXPORT_SYMBOL(pci_request_region);
4017  
4018  /**
4019   * pci_release_selected_regions - Release selected PCI I/O and memory resources
4020   * @pdev: PCI device whose resources were previously reserved
4021   * @bars: Bitmask of BARs to be released
4022   *
4023   * Release selected PCI I/O and memory resources previously reserved.
4024   * Call this function only after all use of the PCI regions has ceased.
4025   */
pci_release_selected_regions(struct pci_dev * pdev,int bars)4026  void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4027  {
4028  	int i;
4029  
4030  	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4031  		if (bars & (1 << i))
4032  			pci_release_region(pdev, i);
4033  }
4034  EXPORT_SYMBOL(pci_release_selected_regions);
4035  
__pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name,int excl)4036  static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4037  					  const char *res_name, int excl)
4038  {
4039  	int i;
4040  
4041  	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4042  		if (bars & (1 << i))
4043  			if (__pci_request_region(pdev, i, res_name, excl))
4044  				goto err_out;
4045  	return 0;
4046  
4047  err_out:
4048  	while (--i >= 0)
4049  		if (bars & (1 << i))
4050  			pci_release_region(pdev, i);
4051  
4052  	return -EBUSY;
4053  }
4054  
4055  
4056  /**
4057   * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4058   * @pdev: PCI device whose resources are to be reserved
4059   * @bars: Bitmask of BARs to be requested
4060   * @res_name: Name to be associated with resource
4061   *
4062   * Returns: 0 on success, negative error code on failure.
4063   *
4064   * NOTE:
4065   * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4066   * when pcim_enable_device() has been called in advance. This hybrid feature is
4067   * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4068   */
pci_request_selected_regions(struct pci_dev * pdev,int bars,const char * res_name)4069  int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4070  				 const char *res_name)
4071  {
4072  	return __pci_request_selected_regions(pdev, bars, res_name, 0);
4073  }
4074  EXPORT_SYMBOL(pci_request_selected_regions);
4075  
4076  /**
4077   * pci_request_selected_regions_exclusive - Request regions exclusively
4078   * @pdev: PCI device to request regions from
4079   * @bars: bit mask of BARs to request
4080   * @res_name: name to be associated with the requests
4081   *
4082   * Returns: 0 on success, negative error code on failure.
4083   *
4084   * NOTE:
4085   * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4086   * when pcim_enable_device() has been called in advance. This hybrid feature is
4087   * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4088   */
pci_request_selected_regions_exclusive(struct pci_dev * pdev,int bars,const char * res_name)4089  int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4090  					   const char *res_name)
4091  {
4092  	return __pci_request_selected_regions(pdev, bars, res_name,
4093  			IORESOURCE_EXCLUSIVE);
4094  }
4095  EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4096  
4097  /**
4098   * pci_release_regions - Release reserved PCI I/O and memory resources
4099   * @pdev: PCI device whose resources were previously reserved by
4100   *	  pci_request_regions()
4101   *
4102   * Releases all PCI I/O and memory resources previously reserved by a
4103   * successful call to pci_request_regions().  Call this function only
4104   * after all use of the PCI regions has ceased.
4105   */
pci_release_regions(struct pci_dev * pdev)4106  void pci_release_regions(struct pci_dev *pdev)
4107  {
4108  	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4109  }
4110  EXPORT_SYMBOL(pci_release_regions);
4111  
4112  /**
4113   * pci_request_regions - Reserve PCI I/O and memory resources
4114   * @pdev: PCI device whose resources are to be reserved
4115   * @res_name: Name to be associated with resource.
4116   *
4117   * Mark all PCI regions associated with PCI device @pdev as
4118   * being reserved by owner @res_name.  Do not access any
4119   * address inside the PCI regions unless this call returns
4120   * successfully.
4121   *
4122   * Returns 0 on success, or %EBUSY on error.  A warning
4123   * message is also printed on failure.
4124   *
4125   * NOTE:
4126   * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4127   * when pcim_enable_device() has been called in advance. This hybrid feature is
4128   * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4129   */
pci_request_regions(struct pci_dev * pdev,const char * res_name)4130  int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4131  {
4132  	return pci_request_selected_regions(pdev,
4133  			((1 << PCI_STD_NUM_BARS) - 1), res_name);
4134  }
4135  EXPORT_SYMBOL(pci_request_regions);
4136  
4137  /**
4138   * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4139   * @pdev: PCI device whose resources are to be reserved
4140   * @res_name: Name to be associated with resource.
4141   *
4142   * Returns: 0 on success, negative error code on failure.
4143   *
4144   * Mark all PCI regions associated with PCI device @pdev as being reserved
4145   * by owner @res_name.  Do not access any address inside the PCI regions
4146   * unless this call returns successfully.
4147   *
4148   * pci_request_regions_exclusive() will mark the region so that /dev/mem
4149   * and the sysfs MMIO access will not be allowed.
4150   *
4151   * Returns 0 on success, or %EBUSY on error.  A warning message is also
4152   * printed on failure.
4153   *
4154   * NOTE:
4155   * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4156   * when pcim_enable_device() has been called in advance. This hybrid feature is
4157   * DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
4158   */
pci_request_regions_exclusive(struct pci_dev * pdev,const char * res_name)4159  int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4160  {
4161  	return pci_request_selected_regions_exclusive(pdev,
4162  				((1 << PCI_STD_NUM_BARS) - 1), res_name);
4163  }
4164  EXPORT_SYMBOL(pci_request_regions_exclusive);
4165  
4166  /*
4167   * Record the PCI IO range (expressed as CPU physical address + size).
4168   * Return a negative value if an error has occurred, zero otherwise
4169   */
pci_register_io_range(struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)4170  int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4171  			resource_size_t	size)
4172  {
4173  	int ret = 0;
4174  #ifdef PCI_IOBASE
4175  	struct logic_pio_hwaddr *range;
4176  
4177  	if (!size || addr + size < addr)
4178  		return -EINVAL;
4179  
4180  	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4181  	if (!range)
4182  		return -ENOMEM;
4183  
4184  	range->fwnode = fwnode;
4185  	range->size = size;
4186  	range->hw_start = addr;
4187  	range->flags = LOGIC_PIO_CPU_MMIO;
4188  
4189  	ret = logic_pio_register_range(range);
4190  	if (ret)
4191  		kfree(range);
4192  
4193  	/* Ignore duplicates due to deferred probing */
4194  	if (ret == -EEXIST)
4195  		ret = 0;
4196  #endif
4197  
4198  	return ret;
4199  }
4200  
pci_pio_to_address(unsigned long pio)4201  phys_addr_t pci_pio_to_address(unsigned long pio)
4202  {
4203  #ifdef PCI_IOBASE
4204  	if (pio < MMIO_UPPER_LIMIT)
4205  		return logic_pio_to_hwaddr(pio);
4206  #endif
4207  
4208  	return (phys_addr_t) OF_BAD_ADDR;
4209  }
4210  EXPORT_SYMBOL_GPL(pci_pio_to_address);
4211  
pci_address_to_pio(phys_addr_t address)4212  unsigned long __weak pci_address_to_pio(phys_addr_t address)
4213  {
4214  #ifdef PCI_IOBASE
4215  	return logic_pio_trans_cpuaddr(address);
4216  #else
4217  	if (address > IO_SPACE_LIMIT)
4218  		return (unsigned long)-1;
4219  
4220  	return (unsigned long) address;
4221  #endif
4222  }
4223  
4224  /**
4225   * pci_remap_iospace - Remap the memory mapped I/O space
4226   * @res: Resource describing the I/O space
4227   * @phys_addr: physical address of range to be mapped
4228   *
4229   * Remap the memory mapped I/O space described by the @res and the CPU
4230   * physical address @phys_addr into virtual address space.  Only
4231   * architectures that have memory mapped IO functions defined (and the
4232   * PCI_IOBASE value defined) should call this function.
4233   */
4234  #ifndef pci_remap_iospace
pci_remap_iospace(const struct resource * res,phys_addr_t phys_addr)4235  int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4236  {
4237  #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4238  	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4239  
4240  	if (!(res->flags & IORESOURCE_IO))
4241  		return -EINVAL;
4242  
4243  	if (res->end > IO_SPACE_LIMIT)
4244  		return -EINVAL;
4245  
4246  	return vmap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4247  			       pgprot_device(PAGE_KERNEL));
4248  #else
4249  	/*
4250  	 * This architecture does not have memory mapped I/O space,
4251  	 * so this function should never be called
4252  	 */
4253  	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4254  	return -ENODEV;
4255  #endif
4256  }
4257  EXPORT_SYMBOL(pci_remap_iospace);
4258  #endif
4259  
4260  /**
4261   * pci_unmap_iospace - Unmap the memory mapped I/O space
4262   * @res: resource to be unmapped
4263   *
4264   * Unmap the CPU virtual address @res from virtual address space.  Only
4265   * architectures that have memory mapped IO functions defined (and the
4266   * PCI_IOBASE value defined) should call this function.
4267   */
pci_unmap_iospace(struct resource * res)4268  void pci_unmap_iospace(struct resource *res)
4269  {
4270  #if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4271  	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4272  
4273  	vunmap_range(vaddr, vaddr + resource_size(res));
4274  #endif
4275  }
4276  EXPORT_SYMBOL(pci_unmap_iospace);
4277  
__pci_set_master(struct pci_dev * dev,bool enable)4278  static void __pci_set_master(struct pci_dev *dev, bool enable)
4279  {
4280  	u16 old_cmd, cmd;
4281  
4282  	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4283  	if (enable)
4284  		cmd = old_cmd | PCI_COMMAND_MASTER;
4285  	else
4286  		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4287  	if (cmd != old_cmd) {
4288  		pci_dbg(dev, "%s bus mastering\n",
4289  			enable ? "enabling" : "disabling");
4290  		pci_write_config_word(dev, PCI_COMMAND, cmd);
4291  	}
4292  	dev->is_busmaster = enable;
4293  }
4294  
4295  /**
4296   * pcibios_setup - process "pci=" kernel boot arguments
4297   * @str: string used to pass in "pci=" kernel boot arguments
4298   *
4299   * Process kernel boot arguments.  This is the default implementation.
4300   * Architecture specific implementations can override this as necessary.
4301   */
pcibios_setup(char * str)4302  char * __weak __init pcibios_setup(char *str)
4303  {
4304  	return str;
4305  }
4306  
4307  /**
4308   * pcibios_set_master - enable PCI bus-mastering for device dev
4309   * @dev: the PCI device to enable
4310   *
4311   * Enables PCI bus-mastering for the device.  This is the default
4312   * implementation.  Architecture specific implementations can override
4313   * this if necessary.
4314   */
pcibios_set_master(struct pci_dev * dev)4315  void __weak pcibios_set_master(struct pci_dev *dev)
4316  {
4317  	u8 lat;
4318  
4319  	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4320  	if (pci_is_pcie(dev))
4321  		return;
4322  
4323  	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4324  	if (lat < 16)
4325  		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4326  	else if (lat > pcibios_max_latency)
4327  		lat = pcibios_max_latency;
4328  	else
4329  		return;
4330  
4331  	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4332  }
4333  
4334  /**
4335   * pci_set_master - enables bus-mastering for device dev
4336   * @dev: the PCI device to enable
4337   *
4338   * Enables bus-mastering on the device and calls pcibios_set_master()
4339   * to do the needed arch specific settings.
4340   */
pci_set_master(struct pci_dev * dev)4341  void pci_set_master(struct pci_dev *dev)
4342  {
4343  	__pci_set_master(dev, true);
4344  	pcibios_set_master(dev);
4345  }
4346  EXPORT_SYMBOL(pci_set_master);
4347  
4348  /**
4349   * pci_clear_master - disables bus-mastering for device dev
4350   * @dev: the PCI device to disable
4351   */
pci_clear_master(struct pci_dev * dev)4352  void pci_clear_master(struct pci_dev *dev)
4353  {
4354  	__pci_set_master(dev, false);
4355  }
4356  EXPORT_SYMBOL(pci_clear_master);
4357  
4358  /**
4359   * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4360   * @dev: the PCI device for which MWI is to be enabled
4361   *
4362   * Helper function for pci_set_mwi.
4363   * Originally copied from drivers/net/acenic.c.
4364   * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4365   *
4366   * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4367   */
pci_set_cacheline_size(struct pci_dev * dev)4368  int pci_set_cacheline_size(struct pci_dev *dev)
4369  {
4370  	u8 cacheline_size;
4371  
4372  	if (!pci_cache_line_size)
4373  		return -EINVAL;
4374  
4375  	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4376  	   equal to or multiple of the right value. */
4377  	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4378  	if (cacheline_size >= pci_cache_line_size &&
4379  	    (cacheline_size % pci_cache_line_size) == 0)
4380  		return 0;
4381  
4382  	/* Write the correct value. */
4383  	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4384  	/* Read it back. */
4385  	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4386  	if (cacheline_size == pci_cache_line_size)
4387  		return 0;
4388  
4389  	pci_dbg(dev, "cache line size of %d is not supported\n",
4390  		   pci_cache_line_size << 2);
4391  
4392  	return -EINVAL;
4393  }
4394  EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4395  
4396  /**
4397   * pci_set_mwi - enables memory-write-invalidate PCI transaction
4398   * @dev: the PCI device for which MWI is enabled
4399   *
4400   * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4401   *
4402   * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4403   */
pci_set_mwi(struct pci_dev * dev)4404  int pci_set_mwi(struct pci_dev *dev)
4405  {
4406  #ifdef PCI_DISABLE_MWI
4407  	return 0;
4408  #else
4409  	int rc;
4410  	u16 cmd;
4411  
4412  	rc = pci_set_cacheline_size(dev);
4413  	if (rc)
4414  		return rc;
4415  
4416  	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4417  	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4418  		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4419  		cmd |= PCI_COMMAND_INVALIDATE;
4420  		pci_write_config_word(dev, PCI_COMMAND, cmd);
4421  	}
4422  	return 0;
4423  #endif
4424  }
4425  EXPORT_SYMBOL(pci_set_mwi);
4426  
4427  /**
4428   * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4429   * @dev: the PCI device for which MWI is enabled
4430   *
4431   * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4432   * Callers are not required to check the return value.
4433   *
4434   * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4435   */
pci_try_set_mwi(struct pci_dev * dev)4436  int pci_try_set_mwi(struct pci_dev *dev)
4437  {
4438  #ifdef PCI_DISABLE_MWI
4439  	return 0;
4440  #else
4441  	return pci_set_mwi(dev);
4442  #endif
4443  }
4444  EXPORT_SYMBOL(pci_try_set_mwi);
4445  
4446  /**
4447   * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4448   * @dev: the PCI device to disable
4449   *
4450   * Disables PCI Memory-Write-Invalidate transaction on the device
4451   */
pci_clear_mwi(struct pci_dev * dev)4452  void pci_clear_mwi(struct pci_dev *dev)
4453  {
4454  #ifndef PCI_DISABLE_MWI
4455  	u16 cmd;
4456  
4457  	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4458  	if (cmd & PCI_COMMAND_INVALIDATE) {
4459  		cmd &= ~PCI_COMMAND_INVALIDATE;
4460  		pci_write_config_word(dev, PCI_COMMAND, cmd);
4461  	}
4462  #endif
4463  }
4464  EXPORT_SYMBOL(pci_clear_mwi);
4465  
4466  /**
4467   * pci_disable_parity - disable parity checking for device
4468   * @dev: the PCI device to operate on
4469   *
4470   * Disable parity checking for device @dev
4471   */
pci_disable_parity(struct pci_dev * dev)4472  void pci_disable_parity(struct pci_dev *dev)
4473  {
4474  	u16 cmd;
4475  
4476  	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4477  	if (cmd & PCI_COMMAND_PARITY) {
4478  		cmd &= ~PCI_COMMAND_PARITY;
4479  		pci_write_config_word(dev, PCI_COMMAND, cmd);
4480  	}
4481  }
4482  
4483  /**
4484   * pci_intx - enables/disables PCI INTx for device dev
4485   * @pdev: the PCI device to operate on
4486   * @enable: boolean: whether to enable or disable PCI INTx
4487   *
4488   * Enables/disables PCI INTx for device @pdev
4489   *
4490   * NOTE:
4491   * This is a "hybrid" function: It's normally unmanaged, but becomes managed
4492   * when pcim_enable_device() has been called in advance. This hybrid feature is
4493   * DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
4494   */
pci_intx(struct pci_dev * pdev,int enable)4495  void pci_intx(struct pci_dev *pdev, int enable)
4496  {
4497  	u16 pci_command, new;
4498  
4499  	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4500  
4501  	if (enable)
4502  		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4503  	else
4504  		new = pci_command | PCI_COMMAND_INTX_DISABLE;
4505  
4506  	if (new != pci_command) {
4507  		/* Preserve the "hybrid" behavior for backwards compatibility */
4508  		if (pci_is_managed(pdev)) {
4509  			WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
4510  			return;
4511  		}
4512  
4513  		pci_write_config_word(pdev, PCI_COMMAND, new);
4514  	}
4515  }
4516  EXPORT_SYMBOL_GPL(pci_intx);
4517  
4518  /**
4519   * pci_wait_for_pending_transaction - wait for pending transaction
4520   * @dev: the PCI device to operate on
4521   *
4522   * Return 0 if transaction is pending 1 otherwise.
4523   */
pci_wait_for_pending_transaction(struct pci_dev * dev)4524  int pci_wait_for_pending_transaction(struct pci_dev *dev)
4525  {
4526  	if (!pci_is_pcie(dev))
4527  		return 1;
4528  
4529  	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4530  				    PCI_EXP_DEVSTA_TRPND);
4531  }
4532  EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4533  
4534  /**
4535   * pcie_flr - initiate a PCIe function level reset
4536   * @dev: device to reset
4537   *
4538   * Initiate a function level reset unconditionally on @dev without
4539   * checking any flags and DEVCAP
4540   */
pcie_flr(struct pci_dev * dev)4541  int pcie_flr(struct pci_dev *dev)
4542  {
4543  	if (!pci_wait_for_pending_transaction(dev))
4544  		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4545  
4546  	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4547  
4548  	if (dev->imm_ready)
4549  		return 0;
4550  
4551  	/*
4552  	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4553  	 * 100ms, but may silently discard requests while the FLR is in
4554  	 * progress.  Wait 100ms before trying to access the device.
4555  	 */
4556  	msleep(100);
4557  
4558  	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4559  }
4560  EXPORT_SYMBOL_GPL(pcie_flr);
4561  
4562  /**
4563   * pcie_reset_flr - initiate a PCIe function level reset
4564   * @dev: device to reset
4565   * @probe: if true, return 0 if device can be reset this way
4566   *
4567   * Initiate a function level reset on @dev.
4568   */
pcie_reset_flr(struct pci_dev * dev,bool probe)4569  int pcie_reset_flr(struct pci_dev *dev, bool probe)
4570  {
4571  	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4572  		return -ENOTTY;
4573  
4574  	if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
4575  		return -ENOTTY;
4576  
4577  	if (probe)
4578  		return 0;
4579  
4580  	return pcie_flr(dev);
4581  }
4582  EXPORT_SYMBOL_GPL(pcie_reset_flr);
4583  
pci_af_flr(struct pci_dev * dev,bool probe)4584  static int pci_af_flr(struct pci_dev *dev, bool probe)
4585  {
4586  	int pos;
4587  	u8 cap;
4588  
4589  	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4590  	if (!pos)
4591  		return -ENOTTY;
4592  
4593  	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4594  		return -ENOTTY;
4595  
4596  	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4597  	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4598  		return -ENOTTY;
4599  
4600  	if (probe)
4601  		return 0;
4602  
4603  	/*
4604  	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4605  	 * is used, so we use the control offset rather than status and shift
4606  	 * the test bit to match.
4607  	 */
4608  	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4609  				 PCI_AF_STATUS_TP << 8))
4610  		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4611  
4612  	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4613  
4614  	if (dev->imm_ready)
4615  		return 0;
4616  
4617  	/*
4618  	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4619  	 * updated 27 July 2006; a device must complete an FLR within
4620  	 * 100ms, but may silently discard requests while the FLR is in
4621  	 * progress.  Wait 100ms before trying to access the device.
4622  	 */
4623  	msleep(100);
4624  
4625  	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4626  }
4627  
4628  /**
4629   * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4630   * @dev: Device to reset.
4631   * @probe: if true, return 0 if the device can be reset this way.
4632   *
4633   * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4634   * unset, it will be reinitialized internally when going from PCI_D3hot to
4635   * PCI_D0.  If that's the case and the device is not in a low-power state
4636   * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4637   *
4638   * NOTE: This causes the caller to sleep for twice the device power transition
4639   * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4640   * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4641   * Moreover, only devices in D0 can be reset by this function.
4642   */
pci_pm_reset(struct pci_dev * dev,bool probe)4643  static int pci_pm_reset(struct pci_dev *dev, bool probe)
4644  {
4645  	u16 csr;
4646  
4647  	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4648  		return -ENOTTY;
4649  
4650  	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4651  	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4652  		return -ENOTTY;
4653  
4654  	if (probe)
4655  		return 0;
4656  
4657  	if (dev->current_state != PCI_D0)
4658  		return -EINVAL;
4659  
4660  	csr &= ~PCI_PM_CTRL_STATE_MASK;
4661  	csr |= PCI_D3hot;
4662  	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4663  	pci_dev_d3_sleep(dev);
4664  
4665  	csr &= ~PCI_PM_CTRL_STATE_MASK;
4666  	csr |= PCI_D0;
4667  	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4668  	pci_dev_d3_sleep(dev);
4669  
4670  	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4671  }
4672  
4673  /**
4674   * pcie_wait_for_link_status - Wait for link status change
4675   * @pdev: Device whose link to wait for.
4676   * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE.
4677   * @active: Waiting for active or inactive?
4678   *
4679   * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4680   * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4681   */
pcie_wait_for_link_status(struct pci_dev * pdev,bool use_lt,bool active)4682  static int pcie_wait_for_link_status(struct pci_dev *pdev,
4683  				     bool use_lt, bool active)
4684  {
4685  	u16 lnksta_mask, lnksta_match;
4686  	unsigned long end_jiffies;
4687  	u16 lnksta;
4688  
4689  	lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA;
4690  	lnksta_match = active ? lnksta_mask : 0;
4691  
4692  	end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS);
4693  	do {
4694  		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
4695  		if ((lnksta & lnksta_mask) == lnksta_match)
4696  			return 0;
4697  		msleep(1);
4698  	} while (time_before(jiffies, end_jiffies));
4699  
4700  	return -ETIMEDOUT;
4701  }
4702  
4703  /**
4704   * pcie_retrain_link - Request a link retrain and wait for it to complete
4705   * @pdev: Device whose link to retrain.
4706   * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status.
4707   *
4708   * Retrain completion status is retrieved from the Link Status Register
4709   * according to @use_lt.  It is not verified whether the use of the DLLLA
4710   * bit is valid.
4711   *
4712   * Return 0 if successful, or -ETIMEDOUT if training has not completed
4713   * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds.
4714   */
pcie_retrain_link(struct pci_dev * pdev,bool use_lt)4715  int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
4716  {
4717  	int rc;
4718  
4719  	/*
4720  	 * Ensure the updated LNKCTL parameters are used during link
4721  	 * training by checking that there is no ongoing link training that
4722  	 * may have started before link parameters were changed, so as to
4723  	 * avoid LTSSM race as recommended in Implementation Note at the end
4724  	 * of PCIe r6.1 sec 7.5.3.7.
4725  	 */
4726  	rc = pcie_wait_for_link_status(pdev, true, false);
4727  	if (rc)
4728  		return rc;
4729  
4730  	pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4731  	if (pdev->clear_retrain_link) {
4732  		/*
4733  		 * Due to an erratum in some devices the Retrain Link bit
4734  		 * needs to be cleared again manually to allow the link
4735  		 * training to succeed.
4736  		 */
4737  		pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL);
4738  	}
4739  
4740  	rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
4741  
4742  	/*
4743  	 * Clear LBMS after a manual retrain so that the bit can be used
4744  	 * to track link speed or width changes made by hardware itself
4745  	 * in attempt to correct unreliable link operation.
4746  	 */
4747  	pcie_capability_write_word(pdev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
4748  	return rc;
4749  }
4750  
4751  /**
4752   * pcie_wait_for_link_delay - Wait until link is active or inactive
4753   * @pdev: Bridge device
4754   * @active: waiting for active or inactive?
4755   * @delay: Delay to wait after link has become active (in ms)
4756   *
4757   * Use this to wait till link becomes active or inactive.
4758   */
pcie_wait_for_link_delay(struct pci_dev * pdev,bool active,int delay)4759  static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4760  				     int delay)
4761  {
4762  	int rc;
4763  
4764  	/*
4765  	 * Some controllers might not implement link active reporting. In this
4766  	 * case, we wait for 1000 ms + any delay requested by the caller.
4767  	 */
4768  	if (!pdev->link_active_reporting) {
4769  		msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay);
4770  		return true;
4771  	}
4772  
4773  	/*
4774  	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4775  	 * after which we should expect an link active if the reset was
4776  	 * successful. If so, software must wait a minimum 100ms before sending
4777  	 * configuration requests to devices downstream this port.
4778  	 *
4779  	 * If the link fails to activate, either the device was physically
4780  	 * removed or the link is permanently failed.
4781  	 */
4782  	if (active)
4783  		msleep(20);
4784  	rc = pcie_wait_for_link_status(pdev, false, active);
4785  	if (active) {
4786  		if (rc)
4787  			rc = pcie_failed_link_retrain(pdev);
4788  		if (rc)
4789  			return false;
4790  
4791  		msleep(delay);
4792  		return true;
4793  	}
4794  
4795  	if (rc)
4796  		return false;
4797  
4798  	return true;
4799  }
4800  
4801  /**
4802   * pcie_wait_for_link - Wait until link is active or inactive
4803   * @pdev: Bridge device
4804   * @active: waiting for active or inactive?
4805   *
4806   * Use this to wait till link becomes active or inactive.
4807   */
pcie_wait_for_link(struct pci_dev * pdev,bool active)4808  bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4809  {
4810  	return pcie_wait_for_link_delay(pdev, active, 100);
4811  }
4812  
4813  /*
4814   * Find maximum D3cold delay required by all the devices on the bus.  The
4815   * spec says 100 ms, but firmware can lower it and we allow drivers to
4816   * increase it as well.
4817   *
4818   * Called with @pci_bus_sem locked for reading.
4819   */
pci_bus_max_d3cold_delay(const struct pci_bus * bus)4820  static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4821  {
4822  	const struct pci_dev *pdev;
4823  	int min_delay = 100;
4824  	int max_delay = 0;
4825  
4826  	list_for_each_entry(pdev, &bus->devices, bus_list) {
4827  		if (pdev->d3cold_delay < min_delay)
4828  			min_delay = pdev->d3cold_delay;
4829  		if (pdev->d3cold_delay > max_delay)
4830  			max_delay = pdev->d3cold_delay;
4831  	}
4832  
4833  	return max(min_delay, max_delay);
4834  }
4835  
4836  /**
4837   * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4838   * @dev: PCI bridge
4839   * @reset_type: reset type in human-readable form
4840   *
4841   * Handle necessary delays before access to the devices on the secondary
4842   * side of the bridge are permitted after D3cold to D0 transition
4843   * or Conventional Reset.
4844   *
4845   * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4846   * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4847   * 4.3.2.
4848   *
4849   * Return 0 on success or -ENOTTY if the first device on the secondary bus
4850   * failed to become accessible.
4851   */
pci_bridge_wait_for_secondary_bus(struct pci_dev * dev,char * reset_type)4852  int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type)
4853  {
4854  	struct pci_dev *child __free(pci_dev_put) = NULL;
4855  	int delay;
4856  
4857  	if (pci_dev_is_disconnected(dev))
4858  		return 0;
4859  
4860  	if (!pci_is_bridge(dev))
4861  		return 0;
4862  
4863  	down_read(&pci_bus_sem);
4864  
4865  	/*
4866  	 * We only deal with devices that are present currently on the bus.
4867  	 * For any hot-added devices the access delay is handled in pciehp
4868  	 * board_added(). In case of ACPI hotplug the firmware is expected
4869  	 * to configure the devices before OS is notified.
4870  	 */
4871  	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4872  		up_read(&pci_bus_sem);
4873  		return 0;
4874  	}
4875  
4876  	/* Take d3cold_delay requirements into account */
4877  	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4878  	if (!delay) {
4879  		up_read(&pci_bus_sem);
4880  		return 0;
4881  	}
4882  
4883  	child = pci_dev_get(list_first_entry(&dev->subordinate->devices,
4884  					     struct pci_dev, bus_list));
4885  	up_read(&pci_bus_sem);
4886  
4887  	/*
4888  	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4889  	 * accessing the device after reset (that is 1000 ms + 100 ms).
4890  	 */
4891  	if (!pci_is_pcie(dev)) {
4892  		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4893  		msleep(1000 + delay);
4894  		return 0;
4895  	}
4896  
4897  	/*
4898  	 * For PCIe downstream and root ports that do not support speeds
4899  	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4900  	 * speeds (gen3) we need to wait first for the data link layer to
4901  	 * become active.
4902  	 *
4903  	 * However, 100 ms is the minimum and the PCIe spec says the
4904  	 * software must allow at least 1s before it can determine that the
4905  	 * device that did not respond is a broken device. Also device can
4906  	 * take longer than that to respond if it indicates so through Request
4907  	 * Retry Status completions.
4908  	 *
4909  	 * Therefore we wait for 100 ms and check for the device presence
4910  	 * until the timeout expires.
4911  	 */
4912  	if (!pcie_downstream_port(dev))
4913  		return 0;
4914  
4915  	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4916  		u16 status;
4917  
4918  		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4919  		msleep(delay);
4920  
4921  		if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay))
4922  			return 0;
4923  
4924  		/*
4925  		 * If the port supports active link reporting we now check
4926  		 * whether the link is active and if not bail out early with
4927  		 * the assumption that the device is not present anymore.
4928  		 */
4929  		if (!dev->link_active_reporting)
4930  			return -ENOTTY;
4931  
4932  		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status);
4933  		if (!(status & PCI_EXP_LNKSTA_DLLLA))
4934  			return -ENOTTY;
4935  
4936  		return pci_dev_wait(child, reset_type,
4937  				    PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT);
4938  	}
4939  
4940  	pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4941  		delay);
4942  	if (!pcie_wait_for_link_delay(dev, true, delay)) {
4943  		/* Did not train, no need to wait any further */
4944  		pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4945  		return -ENOTTY;
4946  	}
4947  
4948  	return pci_dev_wait(child, reset_type,
4949  			    PCIE_RESET_READY_POLL_MS - delay);
4950  }
4951  
pci_reset_secondary_bus(struct pci_dev * dev)4952  void pci_reset_secondary_bus(struct pci_dev *dev)
4953  {
4954  	u16 ctrl;
4955  
4956  	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4957  	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4958  	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4959  
4960  	/*
4961  	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4962  	 * this to 2ms to ensure that we meet the minimum requirement.
4963  	 */
4964  	msleep(2);
4965  
4966  	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4967  	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4968  }
4969  
pcibios_reset_secondary_bus(struct pci_dev * dev)4970  void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4971  {
4972  	pci_reset_secondary_bus(dev);
4973  }
4974  
4975  /**
4976   * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4977   * @dev: Bridge device
4978   *
4979   * Use the bridge control register to assert reset on the secondary bus.
4980   * Devices on the secondary bus are left in power-on state.
4981   */
pci_bridge_secondary_bus_reset(struct pci_dev * dev)4982  int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4983  {
4984  	if (!dev->block_cfg_access)
4985  		pci_warn_once(dev, "unlocked secondary bus reset via: %pS\n",
4986  			      __builtin_return_address(0));
4987  	pcibios_reset_secondary_bus(dev);
4988  
4989  	return pci_bridge_wait_for_secondary_bus(dev, "bus reset");
4990  }
4991  EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4992  
pci_parent_bus_reset(struct pci_dev * dev,bool probe)4993  static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
4994  {
4995  	struct pci_dev *pdev;
4996  
4997  	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4998  	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4999  		return -ENOTTY;
5000  
5001  	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5002  		if (pdev != dev)
5003  			return -ENOTTY;
5004  
5005  	if (probe)
5006  		return 0;
5007  
5008  	return pci_bridge_secondary_bus_reset(dev->bus->self);
5009  }
5010  
pci_reset_hotplug_slot(struct hotplug_slot * hotplug,bool probe)5011  static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5012  {
5013  	int rc = -ENOTTY;
5014  
5015  	if (!hotplug || !try_module_get(hotplug->owner))
5016  		return rc;
5017  
5018  	if (hotplug->ops->reset_slot)
5019  		rc = hotplug->ops->reset_slot(hotplug, probe);
5020  
5021  	module_put(hotplug->owner);
5022  
5023  	return rc;
5024  }
5025  
pci_dev_reset_slot_function(struct pci_dev * dev,bool probe)5026  static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5027  {
5028  	if (dev->multifunction || dev->subordinate || !dev->slot ||
5029  	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5030  		return -ENOTTY;
5031  
5032  	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5033  }
5034  
cxl_port_dvsec(struct pci_dev * dev)5035  static u16 cxl_port_dvsec(struct pci_dev *dev)
5036  {
5037  	return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_CXL,
5038  					 PCI_DVSEC_CXL_PORT);
5039  }
5040  
cxl_sbr_masked(struct pci_dev * dev)5041  static bool cxl_sbr_masked(struct pci_dev *dev)
5042  {
5043  	u16 dvsec, reg;
5044  	int rc;
5045  
5046  	dvsec = cxl_port_dvsec(dev);
5047  	if (!dvsec)
5048  		return false;
5049  
5050  	rc = pci_read_config_word(dev, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
5051  	if (rc || PCI_POSSIBLE_ERROR(reg))
5052  		return false;
5053  
5054  	/*
5055  	 * Per CXL spec r3.1, sec 8.1.5.2, when "Unmask SBR" is 0, the SBR
5056  	 * bit in Bridge Control has no effect.  When 1, the Port generates
5057  	 * hot reset when the SBR bit is set to 1.
5058  	 */
5059  	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR)
5060  		return false;
5061  
5062  	return true;
5063  }
5064  
pci_reset_bus_function(struct pci_dev * dev,bool probe)5065  static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5066  {
5067  	struct pci_dev *bridge = pci_upstream_bridge(dev);
5068  	int rc;
5069  
5070  	/*
5071  	 * If "dev" is below a CXL port that has SBR control masked, SBR
5072  	 * won't do anything, so return error.
5073  	 */
5074  	if (bridge && cxl_sbr_masked(bridge)) {
5075  		if (probe)
5076  			return 0;
5077  
5078  		return -ENOTTY;
5079  	}
5080  
5081  	rc = pci_dev_reset_slot_function(dev, probe);
5082  	if (rc != -ENOTTY)
5083  		return rc;
5084  	return pci_parent_bus_reset(dev, probe);
5085  }
5086  
cxl_reset_bus_function(struct pci_dev * dev,bool probe)5087  static int cxl_reset_bus_function(struct pci_dev *dev, bool probe)
5088  {
5089  	struct pci_dev *bridge;
5090  	u16 dvsec, reg, val;
5091  	int rc;
5092  
5093  	bridge = pci_upstream_bridge(dev);
5094  	if (!bridge)
5095  		return -ENOTTY;
5096  
5097  	dvsec = cxl_port_dvsec(bridge);
5098  	if (!dvsec)
5099  		return -ENOTTY;
5100  
5101  	if (probe)
5102  		return 0;
5103  
5104  	rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg);
5105  	if (rc)
5106  		return -ENOTTY;
5107  
5108  	if (reg & PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR) {
5109  		val = reg;
5110  	} else {
5111  		val = reg | PCI_DVSEC_CXL_PORT_CTL_UNMASK_SBR;
5112  		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5113  				      val);
5114  	}
5115  
5116  	rc = pci_reset_bus_function(dev, probe);
5117  
5118  	if (reg != val)
5119  		pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL,
5120  				      reg);
5121  
5122  	return rc;
5123  }
5124  
pci_dev_lock(struct pci_dev * dev)5125  void pci_dev_lock(struct pci_dev *dev)
5126  {
5127  	/* block PM suspend, driver probe, etc. */
5128  	device_lock(&dev->dev);
5129  	pci_cfg_access_lock(dev);
5130  }
5131  EXPORT_SYMBOL_GPL(pci_dev_lock);
5132  
5133  /* Return 1 on successful lock, 0 on contention */
pci_dev_trylock(struct pci_dev * dev)5134  int pci_dev_trylock(struct pci_dev *dev)
5135  {
5136  	if (device_trylock(&dev->dev)) {
5137  		if (pci_cfg_access_trylock(dev))
5138  			return 1;
5139  		device_unlock(&dev->dev);
5140  	}
5141  
5142  	return 0;
5143  }
5144  EXPORT_SYMBOL_GPL(pci_dev_trylock);
5145  
pci_dev_unlock(struct pci_dev * dev)5146  void pci_dev_unlock(struct pci_dev *dev)
5147  {
5148  	pci_cfg_access_unlock(dev);
5149  	device_unlock(&dev->dev);
5150  }
5151  EXPORT_SYMBOL_GPL(pci_dev_unlock);
5152  
pci_dev_save_and_disable(struct pci_dev * dev)5153  static void pci_dev_save_and_disable(struct pci_dev *dev)
5154  {
5155  	const struct pci_error_handlers *err_handler =
5156  			dev->driver ? dev->driver->err_handler : NULL;
5157  
5158  	/*
5159  	 * dev->driver->err_handler->reset_prepare() is protected against
5160  	 * races with ->remove() by the device lock, which must be held by
5161  	 * the caller.
5162  	 */
5163  	if (err_handler && err_handler->reset_prepare)
5164  		err_handler->reset_prepare(dev);
5165  
5166  	/*
5167  	 * Wake-up device prior to save.  PM registers default to D0 after
5168  	 * reset and a simple register restore doesn't reliably return
5169  	 * to a non-D0 state anyway.
5170  	 */
5171  	pci_set_power_state(dev, PCI_D0);
5172  
5173  	pci_save_state(dev);
5174  	/*
5175  	 * Disable the device by clearing the Command register, except for
5176  	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5177  	 * BARs, but also prevents the device from being Bus Master, preventing
5178  	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5179  	 * compliant devices, INTx-disable prevents legacy interrupts.
5180  	 */
5181  	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5182  }
5183  
pci_dev_restore(struct pci_dev * dev)5184  static void pci_dev_restore(struct pci_dev *dev)
5185  {
5186  	const struct pci_error_handlers *err_handler =
5187  			dev->driver ? dev->driver->err_handler : NULL;
5188  
5189  	pci_restore_state(dev);
5190  
5191  	/*
5192  	 * dev->driver->err_handler->reset_done() is protected against
5193  	 * races with ->remove() by the device lock, which must be held by
5194  	 * the caller.
5195  	 */
5196  	if (err_handler && err_handler->reset_done)
5197  		err_handler->reset_done(dev);
5198  }
5199  
5200  /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5201  static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5202  	{ },
5203  	{ pci_dev_specific_reset, .name = "device_specific" },
5204  	{ pci_dev_acpi_reset, .name = "acpi" },
5205  	{ pcie_reset_flr, .name = "flr" },
5206  	{ pci_af_flr, .name = "af_flr" },
5207  	{ pci_pm_reset, .name = "pm" },
5208  	{ pci_reset_bus_function, .name = "bus" },
5209  	{ cxl_reset_bus_function, .name = "cxl_bus" },
5210  };
5211  
reset_method_show(struct device * dev,struct device_attribute * attr,char * buf)5212  static ssize_t reset_method_show(struct device *dev,
5213  				 struct device_attribute *attr, char *buf)
5214  {
5215  	struct pci_dev *pdev = to_pci_dev(dev);
5216  	ssize_t len = 0;
5217  	int i, m;
5218  
5219  	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5220  		m = pdev->reset_methods[i];
5221  		if (!m)
5222  			break;
5223  
5224  		len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5225  				     pci_reset_fn_methods[m].name);
5226  	}
5227  
5228  	if (len)
5229  		len += sysfs_emit_at(buf, len, "\n");
5230  
5231  	return len;
5232  }
5233  
reset_method_lookup(const char * name)5234  static int reset_method_lookup(const char *name)
5235  {
5236  	int m;
5237  
5238  	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5239  		if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5240  			return m;
5241  	}
5242  
5243  	return 0;	/* not found */
5244  }
5245  
reset_method_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)5246  static ssize_t reset_method_store(struct device *dev,
5247  				  struct device_attribute *attr,
5248  				  const char *buf, size_t count)
5249  {
5250  	struct pci_dev *pdev = to_pci_dev(dev);
5251  	char *options, *name;
5252  	int m, n;
5253  	u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5254  
5255  	if (sysfs_streq(buf, "")) {
5256  		pdev->reset_methods[0] = 0;
5257  		pci_warn(pdev, "All device reset methods disabled by user");
5258  		return count;
5259  	}
5260  
5261  	if (sysfs_streq(buf, "default")) {
5262  		pci_init_reset_methods(pdev);
5263  		return count;
5264  	}
5265  
5266  	options = kstrndup(buf, count, GFP_KERNEL);
5267  	if (!options)
5268  		return -ENOMEM;
5269  
5270  	n = 0;
5271  	while ((name = strsep(&options, " ")) != NULL) {
5272  		if (sysfs_streq(name, ""))
5273  			continue;
5274  
5275  		name = strim(name);
5276  
5277  		m = reset_method_lookup(name);
5278  		if (!m) {
5279  			pci_err(pdev, "Invalid reset method '%s'", name);
5280  			goto error;
5281  		}
5282  
5283  		if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5284  			pci_err(pdev, "Unsupported reset method '%s'", name);
5285  			goto error;
5286  		}
5287  
5288  		if (n == PCI_NUM_RESET_METHODS - 1) {
5289  			pci_err(pdev, "Too many reset methods\n");
5290  			goto error;
5291  		}
5292  
5293  		reset_methods[n++] = m;
5294  	}
5295  
5296  	reset_methods[n] = 0;
5297  
5298  	/* Warn if dev-specific supported but not highest priority */
5299  	if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5300  	    reset_methods[0] != 1)
5301  		pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5302  	memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5303  	kfree(options);
5304  	return count;
5305  
5306  error:
5307  	/* Leave previous methods unchanged */
5308  	kfree(options);
5309  	return -EINVAL;
5310  }
5311  static DEVICE_ATTR_RW(reset_method);
5312  
5313  static struct attribute *pci_dev_reset_method_attrs[] = {
5314  	&dev_attr_reset_method.attr,
5315  	NULL,
5316  };
5317  
pci_dev_reset_method_attr_is_visible(struct kobject * kobj,struct attribute * a,int n)5318  static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5319  						    struct attribute *a, int n)
5320  {
5321  	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5322  
5323  	if (!pci_reset_supported(pdev))
5324  		return 0;
5325  
5326  	return a->mode;
5327  }
5328  
5329  const struct attribute_group pci_dev_reset_method_attr_group = {
5330  	.attrs = pci_dev_reset_method_attrs,
5331  	.is_visible = pci_dev_reset_method_attr_is_visible,
5332  };
5333  
5334  /**
5335   * __pci_reset_function_locked - reset a PCI device function while holding
5336   * the @dev mutex lock.
5337   * @dev: PCI device to reset
5338   *
5339   * Some devices allow an individual function to be reset without affecting
5340   * other functions in the same device.  The PCI device must be responsive
5341   * to PCI config space in order to use this function.
5342   *
5343   * The device function is presumed to be unused and the caller is holding
5344   * the device mutex lock when this function is called.
5345   *
5346   * Resetting the device will make the contents of PCI configuration space
5347   * random, so any caller of this must be prepared to reinitialise the
5348   * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5349   * etc.
5350   *
5351   * Returns 0 if the device function was successfully reset or negative if the
5352   * device doesn't support resetting a single function.
5353   */
__pci_reset_function_locked(struct pci_dev * dev)5354  int __pci_reset_function_locked(struct pci_dev *dev)
5355  {
5356  	int i, m, rc;
5357  
5358  	might_sleep();
5359  
5360  	/*
5361  	 * A reset method returns -ENOTTY if it doesn't support this device and
5362  	 * we should try the next method.
5363  	 *
5364  	 * If it returns 0 (success), we're finished.  If it returns any other
5365  	 * error, we're also finished: this indicates that further reset
5366  	 * mechanisms might be broken on the device.
5367  	 */
5368  	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5369  		m = dev->reset_methods[i];
5370  		if (!m)
5371  			return -ENOTTY;
5372  
5373  		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5374  		if (!rc)
5375  			return 0;
5376  		if (rc != -ENOTTY)
5377  			return rc;
5378  	}
5379  
5380  	return -ENOTTY;
5381  }
5382  EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5383  
5384  /**
5385   * pci_init_reset_methods - check whether device can be safely reset
5386   * and store supported reset mechanisms.
5387   * @dev: PCI device to check for reset mechanisms
5388   *
5389   * Some devices allow an individual function to be reset without affecting
5390   * other functions in the same device.  The PCI device must be in D0-D3hot
5391   * state.
5392   *
5393   * Stores reset mechanisms supported by device in reset_methods byte array
5394   * which is a member of struct pci_dev.
5395   */
pci_init_reset_methods(struct pci_dev * dev)5396  void pci_init_reset_methods(struct pci_dev *dev)
5397  {
5398  	int m, i, rc;
5399  
5400  	BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5401  
5402  	might_sleep();
5403  
5404  	i = 0;
5405  	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5406  		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5407  		if (!rc)
5408  			dev->reset_methods[i++] = m;
5409  		else if (rc != -ENOTTY)
5410  			break;
5411  	}
5412  
5413  	dev->reset_methods[i] = 0;
5414  }
5415  
5416  /**
5417   * pci_reset_function - quiesce and reset a PCI device function
5418   * @dev: PCI device to reset
5419   *
5420   * Some devices allow an individual function to be reset without affecting
5421   * other functions in the same device.  The PCI device must be responsive
5422   * to PCI config space in order to use this function.
5423   *
5424   * This function does not just reset the PCI portion of a device, but
5425   * clears all the state associated with the device.  This function differs
5426   * from __pci_reset_function_locked() in that it saves and restores device state
5427   * over the reset and takes the PCI device lock.
5428   *
5429   * Returns 0 if the device function was successfully reset or negative if the
5430   * device doesn't support resetting a single function.
5431   */
pci_reset_function(struct pci_dev * dev)5432  int pci_reset_function(struct pci_dev *dev)
5433  {
5434  	struct pci_dev *bridge;
5435  	int rc;
5436  
5437  	if (!pci_reset_supported(dev))
5438  		return -ENOTTY;
5439  
5440  	/*
5441  	 * If there's no upstream bridge, no locking is needed since there is
5442  	 * no upstream bridge configuration to hold consistent.
5443  	 */
5444  	bridge = pci_upstream_bridge(dev);
5445  	if (bridge)
5446  		pci_dev_lock(bridge);
5447  
5448  	pci_dev_lock(dev);
5449  	pci_dev_save_and_disable(dev);
5450  
5451  	rc = __pci_reset_function_locked(dev);
5452  
5453  	pci_dev_restore(dev);
5454  	pci_dev_unlock(dev);
5455  
5456  	if (bridge)
5457  		pci_dev_unlock(bridge);
5458  
5459  	return rc;
5460  }
5461  EXPORT_SYMBOL_GPL(pci_reset_function);
5462  
5463  /**
5464   * pci_reset_function_locked - quiesce and reset a PCI device function
5465   * @dev: PCI device to reset
5466   *
5467   * Some devices allow an individual function to be reset without affecting
5468   * other functions in the same device.  The PCI device must be responsive
5469   * to PCI config space in order to use this function.
5470   *
5471   * This function does not just reset the PCI portion of a device, but
5472   * clears all the state associated with the device.  This function differs
5473   * from __pci_reset_function_locked() in that it saves and restores device state
5474   * over the reset.  It also differs from pci_reset_function() in that it
5475   * requires the PCI device lock to be held.
5476   *
5477   * Returns 0 if the device function was successfully reset or negative if the
5478   * device doesn't support resetting a single function.
5479   */
pci_reset_function_locked(struct pci_dev * dev)5480  int pci_reset_function_locked(struct pci_dev *dev)
5481  {
5482  	int rc;
5483  
5484  	if (!pci_reset_supported(dev))
5485  		return -ENOTTY;
5486  
5487  	pci_dev_save_and_disable(dev);
5488  
5489  	rc = __pci_reset_function_locked(dev);
5490  
5491  	pci_dev_restore(dev);
5492  
5493  	return rc;
5494  }
5495  EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5496  
5497  /**
5498   * pci_try_reset_function - quiesce and reset a PCI device function
5499   * @dev: PCI device to reset
5500   *
5501   * Same as above, except return -EAGAIN if unable to lock device.
5502   */
pci_try_reset_function(struct pci_dev * dev)5503  int pci_try_reset_function(struct pci_dev *dev)
5504  {
5505  	int rc;
5506  
5507  	if (!pci_reset_supported(dev))
5508  		return -ENOTTY;
5509  
5510  	if (!pci_dev_trylock(dev))
5511  		return -EAGAIN;
5512  
5513  	pci_dev_save_and_disable(dev);
5514  	rc = __pci_reset_function_locked(dev);
5515  	pci_dev_restore(dev);
5516  	pci_dev_unlock(dev);
5517  
5518  	return rc;
5519  }
5520  EXPORT_SYMBOL_GPL(pci_try_reset_function);
5521  
5522  /* Do any devices on or below this bus prevent a bus reset? */
pci_bus_resettable(struct pci_bus * bus)5523  static bool pci_bus_resettable(struct pci_bus *bus)
5524  {
5525  	struct pci_dev *dev;
5526  
5527  
5528  	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5529  		return false;
5530  
5531  	list_for_each_entry(dev, &bus->devices, bus_list) {
5532  		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5533  		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5534  			return false;
5535  	}
5536  
5537  	return true;
5538  }
5539  
5540  /* Lock devices from the top of the tree down */
pci_bus_lock(struct pci_bus * bus)5541  static void pci_bus_lock(struct pci_bus *bus)
5542  {
5543  	struct pci_dev *dev;
5544  
5545  	pci_dev_lock(bus->self);
5546  	list_for_each_entry(dev, &bus->devices, bus_list) {
5547  		if (dev->subordinate)
5548  			pci_bus_lock(dev->subordinate);
5549  		else
5550  			pci_dev_lock(dev);
5551  	}
5552  }
5553  
5554  /* Unlock devices from the bottom of the tree up */
pci_bus_unlock(struct pci_bus * bus)5555  static void pci_bus_unlock(struct pci_bus *bus)
5556  {
5557  	struct pci_dev *dev;
5558  
5559  	list_for_each_entry(dev, &bus->devices, bus_list) {
5560  		if (dev->subordinate)
5561  			pci_bus_unlock(dev->subordinate);
5562  		else
5563  			pci_dev_unlock(dev);
5564  	}
5565  	pci_dev_unlock(bus->self);
5566  }
5567  
5568  /* Return 1 on successful lock, 0 on contention */
pci_bus_trylock(struct pci_bus * bus)5569  static int pci_bus_trylock(struct pci_bus *bus)
5570  {
5571  	struct pci_dev *dev;
5572  
5573  	if (!pci_dev_trylock(bus->self))
5574  		return 0;
5575  
5576  	list_for_each_entry(dev, &bus->devices, bus_list) {
5577  		if (dev->subordinate) {
5578  			if (!pci_bus_trylock(dev->subordinate))
5579  				goto unlock;
5580  		} else if (!pci_dev_trylock(dev))
5581  			goto unlock;
5582  	}
5583  	return 1;
5584  
5585  unlock:
5586  	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5587  		if (dev->subordinate)
5588  			pci_bus_unlock(dev->subordinate);
5589  		else
5590  			pci_dev_unlock(dev);
5591  	}
5592  	pci_dev_unlock(bus->self);
5593  	return 0;
5594  }
5595  
5596  /* Do any devices on or below this slot prevent a bus reset? */
pci_slot_resettable(struct pci_slot * slot)5597  static bool pci_slot_resettable(struct pci_slot *slot)
5598  {
5599  	struct pci_dev *dev;
5600  
5601  	if (slot->bus->self &&
5602  	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5603  		return false;
5604  
5605  	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5606  		if (!dev->slot || dev->slot != slot)
5607  			continue;
5608  		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5609  		    (dev->subordinate && !pci_bus_resettable(dev->subordinate)))
5610  			return false;
5611  	}
5612  
5613  	return true;
5614  }
5615  
5616  /* Lock devices from the top of the tree down */
pci_slot_lock(struct pci_slot * slot)5617  static void pci_slot_lock(struct pci_slot *slot)
5618  {
5619  	struct pci_dev *dev;
5620  
5621  	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5622  		if (!dev->slot || dev->slot != slot)
5623  			continue;
5624  		if (dev->subordinate)
5625  			pci_bus_lock(dev->subordinate);
5626  		else
5627  			pci_dev_lock(dev);
5628  	}
5629  }
5630  
5631  /* Unlock devices from the bottom of the tree up */
pci_slot_unlock(struct pci_slot * slot)5632  static void pci_slot_unlock(struct pci_slot *slot)
5633  {
5634  	struct pci_dev *dev;
5635  
5636  	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5637  		if (!dev->slot || dev->slot != slot)
5638  			continue;
5639  		if (dev->subordinate)
5640  			pci_bus_unlock(dev->subordinate);
5641  		pci_dev_unlock(dev);
5642  	}
5643  }
5644  
5645  /* Return 1 on successful lock, 0 on contention */
pci_slot_trylock(struct pci_slot * slot)5646  static int pci_slot_trylock(struct pci_slot *slot)
5647  {
5648  	struct pci_dev *dev;
5649  
5650  	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5651  		if (!dev->slot || dev->slot != slot)
5652  			continue;
5653  		if (dev->subordinate) {
5654  			if (!pci_bus_trylock(dev->subordinate)) {
5655  				pci_dev_unlock(dev);
5656  				goto unlock;
5657  			}
5658  		} else if (!pci_dev_trylock(dev))
5659  			goto unlock;
5660  	}
5661  	return 1;
5662  
5663  unlock:
5664  	list_for_each_entry_continue_reverse(dev,
5665  					     &slot->bus->devices, bus_list) {
5666  		if (!dev->slot || dev->slot != slot)
5667  			continue;
5668  		if (dev->subordinate)
5669  			pci_bus_unlock(dev->subordinate);
5670  		else
5671  			pci_dev_unlock(dev);
5672  	}
5673  	return 0;
5674  }
5675  
5676  /*
5677   * Save and disable devices from the top of the tree down while holding
5678   * the @dev mutex lock for the entire tree.
5679   */
pci_bus_save_and_disable_locked(struct pci_bus * bus)5680  static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5681  {
5682  	struct pci_dev *dev;
5683  
5684  	list_for_each_entry(dev, &bus->devices, bus_list) {
5685  		pci_dev_save_and_disable(dev);
5686  		if (dev->subordinate)
5687  			pci_bus_save_and_disable_locked(dev->subordinate);
5688  	}
5689  }
5690  
5691  /*
5692   * Restore devices from top of the tree down while holding @dev mutex lock
5693   * for the entire tree.  Parent bridges need to be restored before we can
5694   * get to subordinate devices.
5695   */
pci_bus_restore_locked(struct pci_bus * bus)5696  static void pci_bus_restore_locked(struct pci_bus *bus)
5697  {
5698  	struct pci_dev *dev;
5699  
5700  	list_for_each_entry(dev, &bus->devices, bus_list) {
5701  		pci_dev_restore(dev);
5702  		if (dev->subordinate) {
5703  			pci_bridge_wait_for_secondary_bus(dev, "bus reset");
5704  			pci_bus_restore_locked(dev->subordinate);
5705  		}
5706  	}
5707  }
5708  
5709  /*
5710   * Save and disable devices from the top of the tree down while holding
5711   * the @dev mutex lock for the entire tree.
5712   */
pci_slot_save_and_disable_locked(struct pci_slot * slot)5713  static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5714  {
5715  	struct pci_dev *dev;
5716  
5717  	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5718  		if (!dev->slot || dev->slot != slot)
5719  			continue;
5720  		pci_dev_save_and_disable(dev);
5721  		if (dev->subordinate)
5722  			pci_bus_save_and_disable_locked(dev->subordinate);
5723  	}
5724  }
5725  
5726  /*
5727   * Restore devices from top of the tree down while holding @dev mutex lock
5728   * for the entire tree.  Parent bridges need to be restored before we can
5729   * get to subordinate devices.
5730   */
pci_slot_restore_locked(struct pci_slot * slot)5731  static void pci_slot_restore_locked(struct pci_slot *slot)
5732  {
5733  	struct pci_dev *dev;
5734  
5735  	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5736  		if (!dev->slot || dev->slot != slot)
5737  			continue;
5738  		pci_dev_restore(dev);
5739  		if (dev->subordinate) {
5740  			pci_bridge_wait_for_secondary_bus(dev, "slot reset");
5741  			pci_bus_restore_locked(dev->subordinate);
5742  		}
5743  	}
5744  }
5745  
pci_slot_reset(struct pci_slot * slot,bool probe)5746  static int pci_slot_reset(struct pci_slot *slot, bool probe)
5747  {
5748  	int rc;
5749  
5750  	if (!slot || !pci_slot_resettable(slot))
5751  		return -ENOTTY;
5752  
5753  	if (!probe)
5754  		pci_slot_lock(slot);
5755  
5756  	might_sleep();
5757  
5758  	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5759  
5760  	if (!probe)
5761  		pci_slot_unlock(slot);
5762  
5763  	return rc;
5764  }
5765  
5766  /**
5767   * pci_probe_reset_slot - probe whether a PCI slot can be reset
5768   * @slot: PCI slot to probe
5769   *
5770   * Return 0 if slot can be reset, negative if a slot reset is not supported.
5771   */
pci_probe_reset_slot(struct pci_slot * slot)5772  int pci_probe_reset_slot(struct pci_slot *slot)
5773  {
5774  	return pci_slot_reset(slot, PCI_RESET_PROBE);
5775  }
5776  EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5777  
5778  /**
5779   * __pci_reset_slot - Try to reset a PCI slot
5780   * @slot: PCI slot to reset
5781   *
5782   * A PCI bus may host multiple slots, each slot may support a reset mechanism
5783   * independent of other slots.  For instance, some slots may support slot power
5784   * control.  In the case of a 1:1 bus to slot architecture, this function may
5785   * wrap the bus reset to avoid spurious slot related events such as hotplug.
5786   * Generally a slot reset should be attempted before a bus reset.  All of the
5787   * function of the slot and any subordinate buses behind the slot are reset
5788   * through this function.  PCI config space of all devices in the slot and
5789   * behind the slot is saved before and restored after reset.
5790   *
5791   * Same as above except return -EAGAIN if the slot cannot be locked
5792   */
__pci_reset_slot(struct pci_slot * slot)5793  static int __pci_reset_slot(struct pci_slot *slot)
5794  {
5795  	int rc;
5796  
5797  	rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5798  	if (rc)
5799  		return rc;
5800  
5801  	if (pci_slot_trylock(slot)) {
5802  		pci_slot_save_and_disable_locked(slot);
5803  		might_sleep();
5804  		rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5805  		pci_slot_restore_locked(slot);
5806  		pci_slot_unlock(slot);
5807  	} else
5808  		rc = -EAGAIN;
5809  
5810  	return rc;
5811  }
5812  
pci_bus_reset(struct pci_bus * bus,bool probe)5813  static int pci_bus_reset(struct pci_bus *bus, bool probe)
5814  {
5815  	int ret;
5816  
5817  	if (!bus->self || !pci_bus_resettable(bus))
5818  		return -ENOTTY;
5819  
5820  	if (probe)
5821  		return 0;
5822  
5823  	pci_bus_lock(bus);
5824  
5825  	might_sleep();
5826  
5827  	ret = pci_bridge_secondary_bus_reset(bus->self);
5828  
5829  	pci_bus_unlock(bus);
5830  
5831  	return ret;
5832  }
5833  
5834  /**
5835   * pci_bus_error_reset - reset the bridge's subordinate bus
5836   * @bridge: The parent device that connects to the bus to reset
5837   *
5838   * This function will first try to reset the slots on this bus if the method is
5839   * available. If slot reset fails or is not available, this will fall back to a
5840   * secondary bus reset.
5841   */
pci_bus_error_reset(struct pci_dev * bridge)5842  int pci_bus_error_reset(struct pci_dev *bridge)
5843  {
5844  	struct pci_bus *bus = bridge->subordinate;
5845  	struct pci_slot *slot;
5846  
5847  	if (!bus)
5848  		return -ENOTTY;
5849  
5850  	mutex_lock(&pci_slot_mutex);
5851  	if (list_empty(&bus->slots))
5852  		goto bus_reset;
5853  
5854  	list_for_each_entry(slot, &bus->slots, list)
5855  		if (pci_probe_reset_slot(slot))
5856  			goto bus_reset;
5857  
5858  	list_for_each_entry(slot, &bus->slots, list)
5859  		if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5860  			goto bus_reset;
5861  
5862  	mutex_unlock(&pci_slot_mutex);
5863  	return 0;
5864  bus_reset:
5865  	mutex_unlock(&pci_slot_mutex);
5866  	return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5867  }
5868  
5869  /**
5870   * pci_probe_reset_bus - probe whether a PCI bus can be reset
5871   * @bus: PCI bus to probe
5872   *
5873   * Return 0 if bus can be reset, negative if a bus reset is not supported.
5874   */
pci_probe_reset_bus(struct pci_bus * bus)5875  int pci_probe_reset_bus(struct pci_bus *bus)
5876  {
5877  	return pci_bus_reset(bus, PCI_RESET_PROBE);
5878  }
5879  EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5880  
5881  /**
5882   * __pci_reset_bus - Try to reset a PCI bus
5883   * @bus: top level PCI bus to reset
5884   *
5885   * Same as above except return -EAGAIN if the bus cannot be locked
5886   */
__pci_reset_bus(struct pci_bus * bus)5887  static int __pci_reset_bus(struct pci_bus *bus)
5888  {
5889  	int rc;
5890  
5891  	rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5892  	if (rc)
5893  		return rc;
5894  
5895  	if (pci_bus_trylock(bus)) {
5896  		pci_bus_save_and_disable_locked(bus);
5897  		might_sleep();
5898  		rc = pci_bridge_secondary_bus_reset(bus->self);
5899  		pci_bus_restore_locked(bus);
5900  		pci_bus_unlock(bus);
5901  	} else
5902  		rc = -EAGAIN;
5903  
5904  	return rc;
5905  }
5906  
5907  /**
5908   * pci_reset_bus - Try to reset a PCI bus
5909   * @pdev: top level PCI device to reset via slot/bus
5910   *
5911   * Same as above except return -EAGAIN if the bus cannot be locked
5912   */
pci_reset_bus(struct pci_dev * pdev)5913  int pci_reset_bus(struct pci_dev *pdev)
5914  {
5915  	return (!pci_probe_reset_slot(pdev->slot)) ?
5916  	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5917  }
5918  EXPORT_SYMBOL_GPL(pci_reset_bus);
5919  
5920  /**
5921   * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5922   * @dev: PCI device to query
5923   *
5924   * Returns mmrbc: maximum designed memory read count in bytes or
5925   * appropriate error value.
5926   */
pcix_get_max_mmrbc(struct pci_dev * dev)5927  int pcix_get_max_mmrbc(struct pci_dev *dev)
5928  {
5929  	int cap;
5930  	u32 stat;
5931  
5932  	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5933  	if (!cap)
5934  		return -EINVAL;
5935  
5936  	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5937  		return -EINVAL;
5938  
5939  	return 512 << FIELD_GET(PCI_X_STATUS_MAX_READ, stat);
5940  }
5941  EXPORT_SYMBOL(pcix_get_max_mmrbc);
5942  
5943  /**
5944   * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5945   * @dev: PCI device to query
5946   *
5947   * Returns mmrbc: maximum memory read count in bytes or appropriate error
5948   * value.
5949   */
pcix_get_mmrbc(struct pci_dev * dev)5950  int pcix_get_mmrbc(struct pci_dev *dev)
5951  {
5952  	int cap;
5953  	u16 cmd;
5954  
5955  	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5956  	if (!cap)
5957  		return -EINVAL;
5958  
5959  	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5960  		return -EINVAL;
5961  
5962  	return 512 << FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
5963  }
5964  EXPORT_SYMBOL(pcix_get_mmrbc);
5965  
5966  /**
5967   * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5968   * @dev: PCI device to query
5969   * @mmrbc: maximum memory read count in bytes
5970   *    valid values are 512, 1024, 2048, 4096
5971   *
5972   * If possible sets maximum memory read byte count, some bridges have errata
5973   * that prevent this.
5974   */
pcix_set_mmrbc(struct pci_dev * dev,int mmrbc)5975  int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5976  {
5977  	int cap;
5978  	u32 stat, v, o;
5979  	u16 cmd;
5980  
5981  	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5982  		return -EINVAL;
5983  
5984  	v = ffs(mmrbc) - 10;
5985  
5986  	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5987  	if (!cap)
5988  		return -EINVAL;
5989  
5990  	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5991  		return -EINVAL;
5992  
5993  	if (v > FIELD_GET(PCI_X_STATUS_MAX_READ, stat))
5994  		return -E2BIG;
5995  
5996  	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5997  		return -EINVAL;
5998  
5999  	o = FIELD_GET(PCI_X_CMD_MAX_READ, cmd);
6000  	if (o != v) {
6001  		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
6002  			return -EIO;
6003  
6004  		cmd &= ~PCI_X_CMD_MAX_READ;
6005  		cmd |= FIELD_PREP(PCI_X_CMD_MAX_READ, v);
6006  		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
6007  			return -EIO;
6008  	}
6009  	return 0;
6010  }
6011  EXPORT_SYMBOL(pcix_set_mmrbc);
6012  
6013  /**
6014   * pcie_get_readrq - get PCI Express read request size
6015   * @dev: PCI device to query
6016   *
6017   * Returns maximum memory read request in bytes or appropriate error value.
6018   */
pcie_get_readrq(struct pci_dev * dev)6019  int pcie_get_readrq(struct pci_dev *dev)
6020  {
6021  	u16 ctl;
6022  
6023  	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6024  
6025  	return 128 << FIELD_GET(PCI_EXP_DEVCTL_READRQ, ctl);
6026  }
6027  EXPORT_SYMBOL(pcie_get_readrq);
6028  
6029  /**
6030   * pcie_set_readrq - set PCI Express maximum memory read request
6031   * @dev: PCI device to query
6032   * @rq: maximum memory read count in bytes
6033   *    valid values are 128, 256, 512, 1024, 2048, 4096
6034   *
6035   * If possible sets maximum memory read request in bytes
6036   */
pcie_set_readrq(struct pci_dev * dev,int rq)6037  int pcie_set_readrq(struct pci_dev *dev, int rq)
6038  {
6039  	u16 v;
6040  	int ret;
6041  	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
6042  
6043  	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6044  		return -EINVAL;
6045  
6046  	/*
6047  	 * If using the "performance" PCIe config, we clamp the read rq
6048  	 * size to the max packet size to keep the host bridge from
6049  	 * generating requests larger than we can cope with.
6050  	 */
6051  	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6052  		int mps = pcie_get_mps(dev);
6053  
6054  		if (mps < rq)
6055  			rq = mps;
6056  	}
6057  
6058  	v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
6059  
6060  	if (bridge->no_inc_mrrs) {
6061  		int max_mrrs = pcie_get_readrq(dev);
6062  
6063  		if (rq > max_mrrs) {
6064  			pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
6065  			return -EINVAL;
6066  		}
6067  	}
6068  
6069  	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6070  						  PCI_EXP_DEVCTL_READRQ, v);
6071  
6072  	return pcibios_err_to_errno(ret);
6073  }
6074  EXPORT_SYMBOL(pcie_set_readrq);
6075  
6076  /**
6077   * pcie_get_mps - get PCI Express maximum payload size
6078   * @dev: PCI device to query
6079   *
6080   * Returns maximum payload size in bytes
6081   */
pcie_get_mps(struct pci_dev * dev)6082  int pcie_get_mps(struct pci_dev *dev)
6083  {
6084  	u16 ctl;
6085  
6086  	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
6087  
6088  	return 128 << FIELD_GET(PCI_EXP_DEVCTL_PAYLOAD, ctl);
6089  }
6090  EXPORT_SYMBOL(pcie_get_mps);
6091  
6092  /**
6093   * pcie_set_mps - set PCI Express maximum payload size
6094   * @dev: PCI device to query
6095   * @mps: maximum payload size in bytes
6096   *    valid values are 128, 256, 512, 1024, 2048, 4096
6097   *
6098   * If possible sets maximum payload size
6099   */
pcie_set_mps(struct pci_dev * dev,int mps)6100  int pcie_set_mps(struct pci_dev *dev, int mps)
6101  {
6102  	u16 v;
6103  	int ret;
6104  
6105  	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6106  		return -EINVAL;
6107  
6108  	v = ffs(mps) - 8;
6109  	if (v > dev->pcie_mpss)
6110  		return -EINVAL;
6111  	v = FIELD_PREP(PCI_EXP_DEVCTL_PAYLOAD, v);
6112  
6113  	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6114  						  PCI_EXP_DEVCTL_PAYLOAD, v);
6115  
6116  	return pcibios_err_to_errno(ret);
6117  }
6118  EXPORT_SYMBOL(pcie_set_mps);
6119  
to_pcie_link_speed(u16 lnksta)6120  static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
6121  {
6122  	return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
6123  }
6124  
pcie_link_speed_mbps(struct pci_dev * pdev)6125  int pcie_link_speed_mbps(struct pci_dev *pdev)
6126  {
6127  	u16 lnksta;
6128  	int err;
6129  
6130  	err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
6131  	if (err)
6132  		return err;
6133  
6134  	return pcie_dev_speed_mbps(to_pcie_link_speed(lnksta));
6135  }
6136  EXPORT_SYMBOL(pcie_link_speed_mbps);
6137  
6138  /**
6139   * pcie_bandwidth_available - determine minimum link settings of a PCIe
6140   *			      device and its bandwidth limitation
6141   * @dev: PCI device to query
6142   * @limiting_dev: storage for device causing the bandwidth limitation
6143   * @speed: storage for speed of limiting device
6144   * @width: storage for width of limiting device
6145   *
6146   * Walk up the PCI device chain and find the point where the minimum
6147   * bandwidth is available.  Return the bandwidth available there and (if
6148   * limiting_dev, speed, and width pointers are supplied) information about
6149   * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
6150   * raw bandwidth.
6151   */
pcie_bandwidth_available(struct pci_dev * dev,struct pci_dev ** limiting_dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6152  u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6153  			     enum pci_bus_speed *speed,
6154  			     enum pcie_link_width *width)
6155  {
6156  	u16 lnksta;
6157  	enum pci_bus_speed next_speed;
6158  	enum pcie_link_width next_width;
6159  	u32 bw, next_bw;
6160  
6161  	if (speed)
6162  		*speed = PCI_SPEED_UNKNOWN;
6163  	if (width)
6164  		*width = PCIE_LNK_WIDTH_UNKNOWN;
6165  
6166  	bw = 0;
6167  
6168  	while (dev) {
6169  		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6170  
6171  		next_speed = to_pcie_link_speed(lnksta);
6172  		next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
6173  
6174  		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6175  
6176  		/* Check if current device limits the total bandwidth */
6177  		if (!bw || next_bw <= bw) {
6178  			bw = next_bw;
6179  
6180  			if (limiting_dev)
6181  				*limiting_dev = dev;
6182  			if (speed)
6183  				*speed = next_speed;
6184  			if (width)
6185  				*width = next_width;
6186  		}
6187  
6188  		dev = pci_upstream_bridge(dev);
6189  	}
6190  
6191  	return bw;
6192  }
6193  EXPORT_SYMBOL(pcie_bandwidth_available);
6194  
6195  /**
6196   * pcie_get_speed_cap - query for the PCI device's link speed capability
6197   * @dev: PCI device to query
6198   *
6199   * Query the PCI device speed capability.  Return the maximum link speed
6200   * supported by the device.
6201   */
pcie_get_speed_cap(struct pci_dev * dev)6202  enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6203  {
6204  	u32 lnkcap2, lnkcap;
6205  
6206  	/*
6207  	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
6208  	 * implementation note there recommends using the Supported Link
6209  	 * Speeds Vector in Link Capabilities 2 when supported.
6210  	 *
6211  	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6212  	 * should use the Supported Link Speeds field in Link Capabilities,
6213  	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6214  	 */
6215  	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6216  
6217  	/* PCIe r3.0-compliant */
6218  	if (lnkcap2)
6219  		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6220  
6221  	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6222  	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6223  		return PCIE_SPEED_5_0GT;
6224  	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6225  		return PCIE_SPEED_2_5GT;
6226  
6227  	return PCI_SPEED_UNKNOWN;
6228  }
6229  EXPORT_SYMBOL(pcie_get_speed_cap);
6230  
6231  /**
6232   * pcie_get_width_cap - query for the PCI device's link width capability
6233   * @dev: PCI device to query
6234   *
6235   * Query the PCI device width capability.  Return the maximum link width
6236   * supported by the device.
6237   */
pcie_get_width_cap(struct pci_dev * dev)6238  enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6239  {
6240  	u32 lnkcap;
6241  
6242  	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6243  	if (lnkcap)
6244  		return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
6245  
6246  	return PCIE_LNK_WIDTH_UNKNOWN;
6247  }
6248  EXPORT_SYMBOL(pcie_get_width_cap);
6249  
6250  /**
6251   * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6252   * @dev: PCI device
6253   * @speed: storage for link speed
6254   * @width: storage for link width
6255   *
6256   * Calculate a PCI device's link bandwidth by querying for its link speed
6257   * and width, multiplying them, and applying encoding overhead.  The result
6258   * is in Mb/s, i.e., megabits/second of raw bandwidth.
6259   */
pcie_bandwidth_capable(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)6260  static u32 pcie_bandwidth_capable(struct pci_dev *dev,
6261  				  enum pci_bus_speed *speed,
6262  				  enum pcie_link_width *width)
6263  {
6264  	*speed = pcie_get_speed_cap(dev);
6265  	*width = pcie_get_width_cap(dev);
6266  
6267  	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6268  		return 0;
6269  
6270  	return *width * PCIE_SPEED2MBS_ENC(*speed);
6271  }
6272  
6273  /**
6274   * __pcie_print_link_status - Report the PCI device's link speed and width
6275   * @dev: PCI device to query
6276   * @verbose: Print info even when enough bandwidth is available
6277   *
6278   * If the available bandwidth at the device is less than the device is
6279   * capable of, report the device's maximum possible bandwidth and the
6280   * upstream link that limits its performance.  If @verbose, always print
6281   * the available bandwidth, even if the device isn't constrained.
6282   */
__pcie_print_link_status(struct pci_dev * dev,bool verbose)6283  void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6284  {
6285  	enum pcie_link_width width, width_cap;
6286  	enum pci_bus_speed speed, speed_cap;
6287  	struct pci_dev *limiting_dev = NULL;
6288  	u32 bw_avail, bw_cap;
6289  
6290  	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6291  	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6292  
6293  	if (bw_avail >= bw_cap && verbose)
6294  		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6295  			 bw_cap / 1000, bw_cap % 1000,
6296  			 pci_speed_string(speed_cap), width_cap);
6297  	else if (bw_avail < bw_cap)
6298  		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6299  			 bw_avail / 1000, bw_avail % 1000,
6300  			 pci_speed_string(speed), width,
6301  			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6302  			 bw_cap / 1000, bw_cap % 1000,
6303  			 pci_speed_string(speed_cap), width_cap);
6304  }
6305  
6306  /**
6307   * pcie_print_link_status - Report the PCI device's link speed and width
6308   * @dev: PCI device to query
6309   *
6310   * Report the available bandwidth at the device.
6311   */
pcie_print_link_status(struct pci_dev * dev)6312  void pcie_print_link_status(struct pci_dev *dev)
6313  {
6314  	__pcie_print_link_status(dev, true);
6315  }
6316  EXPORT_SYMBOL(pcie_print_link_status);
6317  
6318  /**
6319   * pci_select_bars - Make BAR mask from the type of resource
6320   * @dev: the PCI device for which BAR mask is made
6321   * @flags: resource type mask to be selected
6322   *
6323   * This helper routine makes bar mask from the type of resource.
6324   */
pci_select_bars(struct pci_dev * dev,unsigned long flags)6325  int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6326  {
6327  	int i, bars = 0;
6328  	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6329  		if (pci_resource_flags(dev, i) & flags)
6330  			bars |= (1 << i);
6331  	return bars;
6332  }
6333  EXPORT_SYMBOL(pci_select_bars);
6334  
6335  /* Some architectures require additional programming to enable VGA */
6336  static arch_set_vga_state_t arch_set_vga_state;
6337  
pci_register_set_vga_state(arch_set_vga_state_t func)6338  void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6339  {
6340  	arch_set_vga_state = func;	/* NULL disables */
6341  }
6342  
pci_set_vga_state_arch(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6343  static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6344  				  unsigned int command_bits, u32 flags)
6345  {
6346  	if (arch_set_vga_state)
6347  		return arch_set_vga_state(dev, decode, command_bits,
6348  						flags);
6349  	return 0;
6350  }
6351  
6352  /**
6353   * pci_set_vga_state - set VGA decode state on device and parents if requested
6354   * @dev: the PCI device
6355   * @decode: true = enable decoding, false = disable decoding
6356   * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6357   * @flags: traverse ancestors and change bridges
6358   * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6359   */
pci_set_vga_state(struct pci_dev * dev,bool decode,unsigned int command_bits,u32 flags)6360  int pci_set_vga_state(struct pci_dev *dev, bool decode,
6361  		      unsigned int command_bits, u32 flags)
6362  {
6363  	struct pci_bus *bus;
6364  	struct pci_dev *bridge;
6365  	u16 cmd;
6366  	int rc;
6367  
6368  	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6369  
6370  	/* ARCH specific VGA enables */
6371  	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6372  	if (rc)
6373  		return rc;
6374  
6375  	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6376  		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6377  		if (decode)
6378  			cmd |= command_bits;
6379  		else
6380  			cmd &= ~command_bits;
6381  		pci_write_config_word(dev, PCI_COMMAND, cmd);
6382  	}
6383  
6384  	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6385  		return 0;
6386  
6387  	bus = dev->bus;
6388  	while (bus) {
6389  		bridge = bus->self;
6390  		if (bridge) {
6391  			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6392  					     &cmd);
6393  			if (decode)
6394  				cmd |= PCI_BRIDGE_CTL_VGA;
6395  			else
6396  				cmd &= ~PCI_BRIDGE_CTL_VGA;
6397  			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6398  					      cmd);
6399  		}
6400  		bus = bus->parent;
6401  	}
6402  	return 0;
6403  }
6404  
6405  #ifdef CONFIG_ACPI
pci_pr3_present(struct pci_dev * pdev)6406  bool pci_pr3_present(struct pci_dev *pdev)
6407  {
6408  	struct acpi_device *adev;
6409  
6410  	if (acpi_disabled)
6411  		return false;
6412  
6413  	adev = ACPI_COMPANION(&pdev->dev);
6414  	if (!adev)
6415  		return false;
6416  
6417  	return adev->power.flags.power_resources &&
6418  		acpi_has_method(adev->handle, "_PR3");
6419  }
6420  EXPORT_SYMBOL_GPL(pci_pr3_present);
6421  #endif
6422  
6423  /**
6424   * pci_add_dma_alias - Add a DMA devfn alias for a device
6425   * @dev: the PCI device for which alias is added
6426   * @devfn_from: alias slot and function
6427   * @nr_devfns: number of subsequent devfns to alias
6428   *
6429   * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6430   * which is used to program permissible bus-devfn source addresses for DMA
6431   * requests in an IOMMU.  These aliases factor into IOMMU group creation
6432   * and are useful for devices generating DMA requests beyond or different
6433   * from their logical bus-devfn.  Examples include device quirks where the
6434   * device simply uses the wrong devfn, as well as non-transparent bridges
6435   * where the alias may be a proxy for devices in another domain.
6436   *
6437   * IOMMU group creation is performed during device discovery or addition,
6438   * prior to any potential DMA mapping and therefore prior to driver probing
6439   * (especially for userspace assigned devices where IOMMU group definition
6440   * cannot be left as a userspace activity).  DMA aliases should therefore
6441   * be configured via quirks, such as the PCI fixup header quirk.
6442   */
pci_add_dma_alias(struct pci_dev * dev,u8 devfn_from,unsigned int nr_devfns)6443  void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6444  		       unsigned int nr_devfns)
6445  {
6446  	int devfn_to;
6447  
6448  	nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6449  	devfn_to = devfn_from + nr_devfns - 1;
6450  
6451  	if (!dev->dma_alias_mask)
6452  		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6453  	if (!dev->dma_alias_mask) {
6454  		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6455  		return;
6456  	}
6457  
6458  	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6459  
6460  	if (nr_devfns == 1)
6461  		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6462  				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6463  	else if (nr_devfns > 1)
6464  		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6465  				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6466  				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6467  }
6468  
pci_devs_are_dma_aliases(struct pci_dev * dev1,struct pci_dev * dev2)6469  bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6470  {
6471  	return (dev1->dma_alias_mask &&
6472  		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6473  	       (dev2->dma_alias_mask &&
6474  		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6475  	       pci_real_dma_dev(dev1) == dev2 ||
6476  	       pci_real_dma_dev(dev2) == dev1;
6477  }
6478  
pci_device_is_present(struct pci_dev * pdev)6479  bool pci_device_is_present(struct pci_dev *pdev)
6480  {
6481  	u32 v;
6482  
6483  	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6484  	pdev = pci_physfn(pdev);
6485  	if (pci_dev_is_disconnected(pdev))
6486  		return false;
6487  	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6488  }
6489  EXPORT_SYMBOL_GPL(pci_device_is_present);
6490  
pci_ignore_hotplug(struct pci_dev * dev)6491  void pci_ignore_hotplug(struct pci_dev *dev)
6492  {
6493  	struct pci_dev *bridge = dev->bus->self;
6494  
6495  	dev->ignore_hotplug = 1;
6496  	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6497  	if (bridge)
6498  		bridge->ignore_hotplug = 1;
6499  }
6500  EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6501  
6502  /**
6503   * pci_real_dma_dev - Get PCI DMA device for PCI device
6504   * @dev: the PCI device that may have a PCI DMA alias
6505   *
6506   * Permits the platform to provide architecture-specific functionality to
6507   * devices needing to alias DMA to another PCI device on another PCI bus. If
6508   * the PCI device is on the same bus, it is recommended to use
6509   * pci_add_dma_alias(). This is the default implementation. Architecture
6510   * implementations can override this.
6511   */
pci_real_dma_dev(struct pci_dev * dev)6512  struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6513  {
6514  	return dev;
6515  }
6516  
pcibios_default_alignment(void)6517  resource_size_t __weak pcibios_default_alignment(void)
6518  {
6519  	return 0;
6520  }
6521  
6522  /*
6523   * Arches that don't want to expose struct resource to userland as-is in
6524   * sysfs and /proc can implement their own pci_resource_to_user().
6525   */
pci_resource_to_user(const struct pci_dev * dev,int bar,const struct resource * rsrc,resource_size_t * start,resource_size_t * end)6526  void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6527  				 const struct resource *rsrc,
6528  				 resource_size_t *start, resource_size_t *end)
6529  {
6530  	*start = rsrc->start;
6531  	*end = rsrc->end;
6532  }
6533  
6534  static char *resource_alignment_param;
6535  static DEFINE_SPINLOCK(resource_alignment_lock);
6536  
6537  /**
6538   * pci_specified_resource_alignment - get resource alignment specified by user.
6539   * @dev: the PCI device to get
6540   * @resize: whether or not to change resources' size when reassigning alignment
6541   *
6542   * RETURNS: Resource alignment if it is specified.
6543   *          Zero if it is not specified.
6544   */
pci_specified_resource_alignment(struct pci_dev * dev,bool * resize)6545  static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6546  							bool *resize)
6547  {
6548  	int align_order, count;
6549  	resource_size_t align = pcibios_default_alignment();
6550  	const char *p;
6551  	int ret;
6552  
6553  	spin_lock(&resource_alignment_lock);
6554  	p = resource_alignment_param;
6555  	if (!p || !*p)
6556  		goto out;
6557  	if (pci_has_flag(PCI_PROBE_ONLY)) {
6558  		align = 0;
6559  		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6560  		goto out;
6561  	}
6562  
6563  	while (*p) {
6564  		count = 0;
6565  		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6566  		    p[count] == '@') {
6567  			p += count + 1;
6568  			if (align_order > 63) {
6569  				pr_err("PCI: Invalid requested alignment (order %d)\n",
6570  				       align_order);
6571  				align_order = PAGE_SHIFT;
6572  			}
6573  		} else {
6574  			align_order = PAGE_SHIFT;
6575  		}
6576  
6577  		ret = pci_dev_str_match(dev, p, &p);
6578  		if (ret == 1) {
6579  			*resize = true;
6580  			align = 1ULL << align_order;
6581  			break;
6582  		} else if (ret < 0) {
6583  			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6584  			       p);
6585  			break;
6586  		}
6587  
6588  		if (*p != ';' && *p != ',') {
6589  			/* End of param or invalid format */
6590  			break;
6591  		}
6592  		p++;
6593  	}
6594  out:
6595  	spin_unlock(&resource_alignment_lock);
6596  	return align;
6597  }
6598  
pci_request_resource_alignment(struct pci_dev * dev,int bar,resource_size_t align,bool resize)6599  static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6600  					   resource_size_t align, bool resize)
6601  {
6602  	struct resource *r = &dev->resource[bar];
6603  	const char *r_name = pci_resource_name(dev, bar);
6604  	resource_size_t size;
6605  
6606  	if (!(r->flags & IORESOURCE_MEM))
6607  		return;
6608  
6609  	if (r->flags & IORESOURCE_PCI_FIXED) {
6610  		pci_info(dev, "%s %pR: ignoring requested alignment %#llx\n",
6611  			 r_name, r, (unsigned long long)align);
6612  		return;
6613  	}
6614  
6615  	size = resource_size(r);
6616  	if (size >= align)
6617  		return;
6618  
6619  	/*
6620  	 * Increase the alignment of the resource.  There are two ways we
6621  	 * can do this:
6622  	 *
6623  	 * 1) Increase the size of the resource.  BARs are aligned on their
6624  	 *    size, so when we reallocate space for this resource, we'll
6625  	 *    allocate it with the larger alignment.  This also prevents
6626  	 *    assignment of any other BARs inside the alignment region, so
6627  	 *    if we're requesting page alignment, this means no other BARs
6628  	 *    will share the page.
6629  	 *
6630  	 *    The disadvantage is that this makes the resource larger than
6631  	 *    the hardware BAR, which may break drivers that compute things
6632  	 *    based on the resource size, e.g., to find registers at a
6633  	 *    fixed offset before the end of the BAR.
6634  	 *
6635  	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6636  	 *    set r->start to the desired alignment.  By itself this
6637  	 *    doesn't prevent other BARs being put inside the alignment
6638  	 *    region, but if we realign *every* resource of every device in
6639  	 *    the system, none of them will share an alignment region.
6640  	 *
6641  	 * When the user has requested alignment for only some devices via
6642  	 * the "pci=resource_alignment" argument, "resize" is true and we
6643  	 * use the first method.  Otherwise we assume we're aligning all
6644  	 * devices and we use the second.
6645  	 */
6646  
6647  	pci_info(dev, "%s %pR: requesting alignment to %#llx\n",
6648  		 r_name, r, (unsigned long long)align);
6649  
6650  	if (resize) {
6651  		r->start = 0;
6652  		r->end = align - 1;
6653  	} else {
6654  		r->flags &= ~IORESOURCE_SIZEALIGN;
6655  		r->flags |= IORESOURCE_STARTALIGN;
6656  		r->start = align;
6657  		r->end = r->start + size - 1;
6658  	}
6659  	r->flags |= IORESOURCE_UNSET;
6660  }
6661  
6662  /*
6663   * This function disables memory decoding and releases memory resources
6664   * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6665   * It also rounds up size to specified alignment.
6666   * Later on, the kernel will assign page-aligned memory resource back
6667   * to the device.
6668   */
pci_reassigndev_resource_alignment(struct pci_dev * dev)6669  void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6670  {
6671  	int i;
6672  	struct resource *r;
6673  	resource_size_t align;
6674  	u16 command;
6675  	bool resize = false;
6676  
6677  	/*
6678  	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6679  	 * 3.4.1.11.  Their resources are allocated from the space
6680  	 * described by the VF BARx register in the PF's SR-IOV capability.
6681  	 * We can't influence their alignment here.
6682  	 */
6683  	if (dev->is_virtfn)
6684  		return;
6685  
6686  	/* check if specified PCI is target device to reassign */
6687  	align = pci_specified_resource_alignment(dev, &resize);
6688  	if (!align)
6689  		return;
6690  
6691  	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6692  	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6693  		pci_warn(dev, "Can't reassign resources to host bridge\n");
6694  		return;
6695  	}
6696  
6697  	pci_read_config_word(dev, PCI_COMMAND, &command);
6698  	command &= ~PCI_COMMAND_MEMORY;
6699  	pci_write_config_word(dev, PCI_COMMAND, command);
6700  
6701  	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6702  		pci_request_resource_alignment(dev, i, align, resize);
6703  
6704  	/*
6705  	 * Need to disable bridge's resource window,
6706  	 * to enable the kernel to reassign new resource
6707  	 * window later on.
6708  	 */
6709  	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6710  		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6711  			r = &dev->resource[i];
6712  			if (!(r->flags & IORESOURCE_MEM))
6713  				continue;
6714  			r->flags |= IORESOURCE_UNSET;
6715  			r->end = resource_size(r) - 1;
6716  			r->start = 0;
6717  		}
6718  		pci_disable_bridge_window(dev);
6719  	}
6720  }
6721  
resource_alignment_show(const struct bus_type * bus,char * buf)6722  static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf)
6723  {
6724  	size_t count = 0;
6725  
6726  	spin_lock(&resource_alignment_lock);
6727  	if (resource_alignment_param)
6728  		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6729  	spin_unlock(&resource_alignment_lock);
6730  
6731  	return count;
6732  }
6733  
resource_alignment_store(const struct bus_type * bus,const char * buf,size_t count)6734  static ssize_t resource_alignment_store(const struct bus_type *bus,
6735  					const char *buf, size_t count)
6736  {
6737  	char *param, *old, *end;
6738  
6739  	if (count >= (PAGE_SIZE - 1))
6740  		return -EINVAL;
6741  
6742  	param = kstrndup(buf, count, GFP_KERNEL);
6743  	if (!param)
6744  		return -ENOMEM;
6745  
6746  	end = strchr(param, '\n');
6747  	if (end)
6748  		*end = '\0';
6749  
6750  	spin_lock(&resource_alignment_lock);
6751  	old = resource_alignment_param;
6752  	if (strlen(param)) {
6753  		resource_alignment_param = param;
6754  	} else {
6755  		kfree(param);
6756  		resource_alignment_param = NULL;
6757  	}
6758  	spin_unlock(&resource_alignment_lock);
6759  
6760  	kfree(old);
6761  
6762  	return count;
6763  }
6764  
6765  static BUS_ATTR_RW(resource_alignment);
6766  
pci_resource_alignment_sysfs_init(void)6767  static int __init pci_resource_alignment_sysfs_init(void)
6768  {
6769  	return bus_create_file(&pci_bus_type,
6770  					&bus_attr_resource_alignment);
6771  }
6772  late_initcall(pci_resource_alignment_sysfs_init);
6773  
pci_no_domains(void)6774  static void pci_no_domains(void)
6775  {
6776  #ifdef CONFIG_PCI_DOMAINS
6777  	pci_domains_supported = 0;
6778  #endif
6779  }
6780  
6781  #ifdef CONFIG_PCI_DOMAINS_GENERIC
6782  static DEFINE_IDA(pci_domain_nr_static_ida);
6783  static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6784  
of_pci_reserve_static_domain_nr(void)6785  static void of_pci_reserve_static_domain_nr(void)
6786  {
6787  	struct device_node *np;
6788  	int domain_nr;
6789  
6790  	for_each_node_by_type(np, "pci") {
6791  		domain_nr = of_get_pci_domain_nr(np);
6792  		if (domain_nr < 0)
6793  			continue;
6794  		/*
6795  		 * Permanently allocate domain_nr in dynamic_ida
6796  		 * to prevent it from dynamic allocation.
6797  		 */
6798  		ida_alloc_range(&pci_domain_nr_dynamic_ida,
6799  				domain_nr, domain_nr, GFP_KERNEL);
6800  	}
6801  }
6802  
of_pci_bus_find_domain_nr(struct device * parent)6803  static int of_pci_bus_find_domain_nr(struct device *parent)
6804  {
6805  	static bool static_domains_reserved = false;
6806  	int domain_nr;
6807  
6808  	/* On the first call scan device tree for static allocations. */
6809  	if (!static_domains_reserved) {
6810  		of_pci_reserve_static_domain_nr();
6811  		static_domains_reserved = true;
6812  	}
6813  
6814  	if (parent) {
6815  		/*
6816  		 * If domain is in DT, allocate it in static IDA.  This
6817  		 * prevents duplicate static allocations in case of errors
6818  		 * in DT.
6819  		 */
6820  		domain_nr = of_get_pci_domain_nr(parent->of_node);
6821  		if (domain_nr >= 0)
6822  			return ida_alloc_range(&pci_domain_nr_static_ida,
6823  					       domain_nr, domain_nr,
6824  					       GFP_KERNEL);
6825  	}
6826  
6827  	/*
6828  	 * If domain was not specified in DT, choose a free ID from dynamic
6829  	 * allocations. All domain numbers from DT are permanently in
6830  	 * dynamic allocations to prevent assigning them to other DT nodes
6831  	 * without static domain.
6832  	 */
6833  	return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6834  }
6835  
of_pci_bus_release_domain_nr(struct device * parent,int domain_nr)6836  static void of_pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6837  {
6838  	if (domain_nr < 0)
6839  		return;
6840  
6841  	/* Release domain from IDA where it was allocated. */
6842  	if (of_get_pci_domain_nr(parent->of_node) == domain_nr)
6843  		ida_free(&pci_domain_nr_static_ida, domain_nr);
6844  	else
6845  		ida_free(&pci_domain_nr_dynamic_ida, domain_nr);
6846  }
6847  
pci_bus_find_domain_nr(struct pci_bus * bus,struct device * parent)6848  int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6849  {
6850  	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6851  			       acpi_pci_bus_find_domain_nr(bus);
6852  }
6853  
pci_bus_release_domain_nr(struct device * parent,int domain_nr)6854  void pci_bus_release_domain_nr(struct device *parent, int domain_nr)
6855  {
6856  	if (!acpi_disabled)
6857  		return;
6858  	of_pci_bus_release_domain_nr(parent, domain_nr);
6859  }
6860  #endif
6861  
6862  /**
6863   * pci_ext_cfg_avail - can we access extended PCI config space?
6864   *
6865   * Returns 1 if we can access PCI extended config space (offsets
6866   * greater than 0xff). This is the default implementation. Architecture
6867   * implementations can override this.
6868   */
pci_ext_cfg_avail(void)6869  int __weak pci_ext_cfg_avail(void)
6870  {
6871  	return 1;
6872  }
6873  
pci_fixup_cardbus(struct pci_bus * bus)6874  void __weak pci_fixup_cardbus(struct pci_bus *bus)
6875  {
6876  }
6877  EXPORT_SYMBOL(pci_fixup_cardbus);
6878  
pci_setup(char * str)6879  static int __init pci_setup(char *str)
6880  {
6881  	while (str) {
6882  		char *k = strchr(str, ',');
6883  		if (k)
6884  			*k++ = 0;
6885  		if (*str && (str = pcibios_setup(str)) && *str) {
6886  			if (!strcmp(str, "nomsi")) {
6887  				pci_no_msi();
6888  			} else if (!strncmp(str, "noats", 5)) {
6889  				pr_info("PCIe: ATS is disabled\n");
6890  				pcie_ats_disabled = true;
6891  			} else if (!strcmp(str, "noaer")) {
6892  				pci_no_aer();
6893  			} else if (!strcmp(str, "earlydump")) {
6894  				pci_early_dump = true;
6895  			} else if (!strncmp(str, "realloc=", 8)) {
6896  				pci_realloc_get_opt(str + 8);
6897  			} else if (!strncmp(str, "realloc", 7)) {
6898  				pci_realloc_get_opt("on");
6899  			} else if (!strcmp(str, "nodomains")) {
6900  				pci_no_domains();
6901  			} else if (!strncmp(str, "noari", 5)) {
6902  				pcie_ari_disabled = true;
6903  			} else if (!strncmp(str, "cbiosize=", 9)) {
6904  				pci_cardbus_io_size = memparse(str + 9, &str);
6905  			} else if (!strncmp(str, "cbmemsize=", 10)) {
6906  				pci_cardbus_mem_size = memparse(str + 10, &str);
6907  			} else if (!strncmp(str, "resource_alignment=", 19)) {
6908  				resource_alignment_param = str + 19;
6909  			} else if (!strncmp(str, "ecrc=", 5)) {
6910  				pcie_ecrc_get_policy(str + 5);
6911  			} else if (!strncmp(str, "hpiosize=", 9)) {
6912  				pci_hotplug_io_size = memparse(str + 9, &str);
6913  			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6914  				pci_hotplug_mmio_size = memparse(str + 11, &str);
6915  			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6916  				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6917  			} else if (!strncmp(str, "hpmemsize=", 10)) {
6918  				pci_hotplug_mmio_size = memparse(str + 10, &str);
6919  				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6920  			} else if (!strncmp(str, "hpbussize=", 10)) {
6921  				pci_hotplug_bus_size =
6922  					simple_strtoul(str + 10, &str, 0);
6923  				if (pci_hotplug_bus_size > 0xff)
6924  					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6925  			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6926  				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6927  			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6928  				pcie_bus_config = PCIE_BUS_SAFE;
6929  			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6930  				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6931  			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6932  				pcie_bus_config = PCIE_BUS_PEER2PEER;
6933  			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6934  				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6935  			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6936  				disable_acs_redir_param = str + 18;
6937  			} else if (!strncmp(str, "config_acs=", 11)) {
6938  				config_acs_param = str + 11;
6939  			} else {
6940  				pr_err("PCI: Unknown option `%s'\n", str);
6941  			}
6942  		}
6943  		str = k;
6944  	}
6945  	return 0;
6946  }
6947  early_param("pci", pci_setup);
6948  
6949  /*
6950   * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6951   * in pci_setup(), above, to point to data in the __initdata section which
6952   * will be freed after the init sequence is complete. We can't allocate memory
6953   * in pci_setup() because some architectures do not have any memory allocation
6954   * service available during an early_param() call. So we allocate memory and
6955   * copy the variable here before the init section is freed.
6956   *
6957   */
pci_realloc_setup_params(void)6958  static int __init pci_realloc_setup_params(void)
6959  {
6960  	resource_alignment_param = kstrdup(resource_alignment_param,
6961  					   GFP_KERNEL);
6962  	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
6963  	config_acs_param = kstrdup(config_acs_param, GFP_KERNEL);
6964  
6965  	return 0;
6966  }
6967  pure_initcall(pci_realloc_setup_params);
6968