1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * PCI <-> OF mapping helpers
4   *
5   * Copyright 2011 IBM Corp.
6   */
7  #define pr_fmt(fmt)	"PCI: OF: " fmt
8  
9  #include <linux/cleanup.h>
10  #include <linux/irqdomain.h>
11  #include <linux/kernel.h>
12  #include <linux/pci.h>
13  #include <linux/of.h>
14  #include <linux/of_irq.h>
15  #include <linux/of_address.h>
16  #include <linux/of_pci.h>
17  #include <linux/platform_device.h>
18  #include "pci.h"
19  
20  #ifdef CONFIG_PCI
21  /**
22   * pci_set_of_node - Find and set device's DT device_node
23   * @dev: the PCI device structure to fill
24   *
25   * Returns 0 on success with of_node set or when no device is described in the
26   * DT. Returns -ENODEV if the device is present, but disabled in the DT.
27   */
pci_set_of_node(struct pci_dev * dev)28  int pci_set_of_node(struct pci_dev *dev)
29  {
30  	if (!dev->bus->dev.of_node)
31  		return 0;
32  
33  	struct device_node *node __free(device_node) =
34  		of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
35  	if (!node)
36  		return 0;
37  
38  	struct device *pdev __free(put_device) =
39  		bus_find_device_by_of_node(&platform_bus_type, node);
40  	if (pdev)
41  		dev->bus->dev.of_node_reused = true;
42  
43  	device_set_node(&dev->dev, of_fwnode_handle(no_free_ptr(node)));
44  	return 0;
45  }
46  
pci_release_of_node(struct pci_dev * dev)47  void pci_release_of_node(struct pci_dev *dev)
48  {
49  	of_node_put(dev->dev.of_node);
50  	device_set_node(&dev->dev, NULL);
51  }
52  
pci_set_bus_of_node(struct pci_bus * bus)53  void pci_set_bus_of_node(struct pci_bus *bus)
54  {
55  	struct device_node *node;
56  
57  	if (bus->self == NULL) {
58  		node = pcibios_get_phb_of_node(bus);
59  	} else {
60  		node = of_node_get(bus->self->dev.of_node);
61  		if (node && of_property_read_bool(node, "external-facing"))
62  			bus->self->external_facing = true;
63  	}
64  
65  	device_set_node(&bus->dev, of_fwnode_handle(node));
66  }
67  
pci_release_bus_of_node(struct pci_bus * bus)68  void pci_release_bus_of_node(struct pci_bus *bus)
69  {
70  	of_node_put(bus->dev.of_node);
71  	device_set_node(&bus->dev, NULL);
72  }
73  
pcibios_get_phb_of_node(struct pci_bus * bus)74  struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
75  {
76  	/* This should only be called for PHBs */
77  	if (WARN_ON(bus->self || bus->parent))
78  		return NULL;
79  
80  	/*
81  	 * Look for a node pointer in either the intermediary device we
82  	 * create above the root bus or its own parent. Normally only
83  	 * the later is populated.
84  	 */
85  	if (bus->bridge->of_node)
86  		return of_node_get(bus->bridge->of_node);
87  	if (bus->bridge->parent && bus->bridge->parent->of_node)
88  		return of_node_get(bus->bridge->parent->of_node);
89  	return NULL;
90  }
91  
pci_host_bridge_of_msi_domain(struct pci_bus * bus)92  struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
93  {
94  #ifdef CONFIG_IRQ_DOMAIN
95  	struct irq_domain *d;
96  
97  	if (!bus->dev.of_node)
98  		return NULL;
99  
100  	/* Start looking for a phandle to an MSI controller. */
101  	d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
102  	if (d)
103  		return d;
104  
105  	/*
106  	 * If we don't have an msi-parent property, look for a domain
107  	 * directly attached to the host bridge.
108  	 */
109  	d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
110  	if (d)
111  		return d;
112  
113  	return irq_find_host(bus->dev.of_node);
114  #else
115  	return NULL;
116  #endif
117  }
118  
pci_host_of_has_msi_map(struct device * dev)119  bool pci_host_of_has_msi_map(struct device *dev)
120  {
121  	if (dev && dev->of_node)
122  		return of_get_property(dev->of_node, "msi-map", NULL);
123  	return false;
124  }
125  
__of_pci_pci_compare(struct device_node * node,unsigned int data)126  static inline int __of_pci_pci_compare(struct device_node *node,
127  				       unsigned int data)
128  {
129  	int devfn;
130  
131  	devfn = of_pci_get_devfn(node);
132  	if (devfn < 0)
133  		return 0;
134  
135  	return devfn == data;
136  }
137  
of_pci_find_child_device(struct device_node * parent,unsigned int devfn)138  struct device_node *of_pci_find_child_device(struct device_node *parent,
139  					     unsigned int devfn)
140  {
141  	struct device_node *node, *node2;
142  
143  	for_each_child_of_node(parent, node) {
144  		if (__of_pci_pci_compare(node, devfn))
145  			return node;
146  		/*
147  		 * Some OFs create a parent node "multifunc-device" as
148  		 * a fake root for all functions of a multi-function
149  		 * device we go down them as well.
150  		 */
151  		if (of_node_name_eq(node, "multifunc-device")) {
152  			for_each_child_of_node(node, node2) {
153  				if (__of_pci_pci_compare(node2, devfn)) {
154  					of_node_put(node);
155  					return node2;
156  				}
157  			}
158  		}
159  	}
160  	return NULL;
161  }
162  EXPORT_SYMBOL_GPL(of_pci_find_child_device);
163  
164  /**
165   * of_pci_get_devfn() - Get device and function numbers for a device node
166   * @np: device node
167   *
168   * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
169   * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
170   * and function numbers respectively. On error a negative error code is
171   * returned.
172   */
of_pci_get_devfn(struct device_node * np)173  int of_pci_get_devfn(struct device_node *np)
174  {
175  	u32 reg[5];
176  	int error;
177  
178  	error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
179  	if (error)
180  		return error;
181  
182  	return (reg[0] >> 8) & 0xff;
183  }
184  EXPORT_SYMBOL_GPL(of_pci_get_devfn);
185  
186  /**
187   * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
188   * @node: device node
189   * @res: address to a struct resource to return the bus-range
190   *
191   * Returns 0 on success or a negative error-code on failure.
192   */
of_pci_parse_bus_range(struct device_node * node,struct resource * res)193  int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
194  {
195  	u32 bus_range[2];
196  	int error;
197  
198  	error = of_property_read_u32_array(node, "bus-range", bus_range,
199  					   ARRAY_SIZE(bus_range));
200  	if (error)
201  		return error;
202  
203  	res->name = node->name;
204  	res->start = bus_range[0];
205  	res->end = bus_range[1];
206  	res->flags = IORESOURCE_BUS;
207  
208  	return 0;
209  }
210  EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
211  
212  /**
213   * of_get_pci_domain_nr - Find the host bridge domain number
214   *			  of the given device node.
215   * @node: Device tree node with the domain information.
216   *
217   * This function will try to obtain the host bridge domain number by finding
218   * a property called "linux,pci-domain" of the given device node.
219   *
220   * Return:
221   * * > 0	- On success, an associated domain number.
222   * * -EINVAL	- The property "linux,pci-domain" does not exist.
223   * * -ENODATA	- The linux,pci-domain" property does not have value.
224   * * -EOVERFLOW	- Invalid "linux,pci-domain" property value.
225   *
226   * Returns the associated domain number from DT in the range [0-0xffff], or
227   * a negative value if the required property is not found.
228   */
of_get_pci_domain_nr(struct device_node * node)229  int of_get_pci_domain_nr(struct device_node *node)
230  {
231  	u32 domain;
232  	int error;
233  
234  	error = of_property_read_u32(node, "linux,pci-domain", &domain);
235  	if (error)
236  		return error;
237  
238  	return (u16)domain;
239  }
240  EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
241  
242  /**
243   * of_pci_preserve_config - Return true if the boot configuration needs to
244   *                          be preserved
245   * @node: Device tree node.
246   *
247   * Look for "linux,pci-probe-only" property for a given PCI controller's
248   * node and return true if found. Also look in the chosen node if the
249   * property is not found in the given controller's node.  Having this
250   * property ensures that the kernel doesn't reconfigure the BARs and bridge
251   * windows that are already done by the platform firmware.
252   *
253   * Return: true if the property exists; false otherwise.
254   */
of_pci_preserve_config(struct device_node * node)255  bool of_pci_preserve_config(struct device_node *node)
256  {
257  	u32 val = 0;
258  	int ret;
259  
260  	if (!node) {
261  		pr_warn("device node is NULL, trying with of_chosen\n");
262  		node = of_chosen;
263  	}
264  
265  retry:
266  	ret = of_property_read_u32(node, "linux,pci-probe-only", &val);
267  	if (ret) {
268  		if (ret == -ENODATA || ret == -EOVERFLOW) {
269  			pr_warn("Incorrect value for linux,pci-probe-only in %pOF, ignoring\n",
270  				node);
271  			return false;
272  		}
273  		if (ret == -EINVAL) {
274  			if (node == of_chosen)
275  				return false;
276  
277  			node = of_chosen;
278  			goto retry;
279  		}
280  	}
281  
282  	if (val)
283  		return true;
284  	else
285  		return false;
286  }
287  
288  /**
289   * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
290   *                           is present and valid
291   */
of_pci_check_probe_only(void)292  void of_pci_check_probe_only(void)
293  {
294  	if (of_pci_preserve_config(of_chosen))
295  		pci_add_flags(PCI_PROBE_ONLY);
296  	else
297  		pci_clear_flags(PCI_PROBE_ONLY);
298  }
299  EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
300  
301  /**
302   * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
303   *                                           host bridge resources from DT
304   * @dev: host bridge device
305   * @busno: bus number associated with the bridge root bus
306   * @bus_max: maximum number of buses for this bridge
307   * @resources: list where the range of resources will be added after DT parsing
308   * @ib_resources: list where the range of inbound resources (with addresses
309   *                from 'dma-ranges') will be added after DT parsing
310   * @io_base: pointer to a variable that will contain on return the physical
311   * address for the start of the I/O range. Can be NULL if the caller doesn't
312   * expect I/O ranges to be present in the device tree.
313   *
314   * This function will parse the "ranges" property of a PCI host bridge device
315   * node and setup the resource mapping based on its content. It is expected
316   * that the property conforms with the Power ePAPR document.
317   *
318   * It returns zero if the range parsing has been successful or a standard error
319   * value if it failed.
320   */
devm_of_pci_get_host_bridge_resources(struct device * dev,unsigned char busno,unsigned char bus_max,struct list_head * resources,struct list_head * ib_resources,resource_size_t * io_base)321  static int devm_of_pci_get_host_bridge_resources(struct device *dev,
322  			unsigned char busno, unsigned char bus_max,
323  			struct list_head *resources,
324  			struct list_head *ib_resources,
325  			resource_size_t *io_base)
326  {
327  	struct device_node *dev_node = dev->of_node;
328  	struct resource *res, tmp_res;
329  	struct resource *bus_range;
330  	struct of_pci_range range;
331  	struct of_pci_range_parser parser;
332  	const char *range_type;
333  	int err;
334  
335  	if (io_base)
336  		*io_base = (resource_size_t)OF_BAD_ADDR;
337  
338  	bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
339  	if (!bus_range)
340  		return -ENOMEM;
341  
342  	dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
343  
344  	err = of_pci_parse_bus_range(dev_node, bus_range);
345  	if (err) {
346  		bus_range->start = busno;
347  		bus_range->end = bus_max;
348  		bus_range->flags = IORESOURCE_BUS;
349  		dev_info(dev, "  No bus range found for %pOF, using %pR\n",
350  			 dev_node, bus_range);
351  	} else {
352  		if (bus_range->end > bus_range->start + bus_max)
353  			bus_range->end = bus_range->start + bus_max;
354  	}
355  	pci_add_resource(resources, bus_range);
356  
357  	/* Check for ranges property */
358  	err = of_pci_range_parser_init(&parser, dev_node);
359  	if (err)
360  		return 0;
361  
362  	dev_dbg(dev, "Parsing ranges property...\n");
363  	for_each_of_pci_range(&parser, &range) {
364  		/* Read next ranges element */
365  		if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
366  			range_type = "IO";
367  		else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
368  			range_type = "MEM";
369  		else
370  			range_type = "err";
371  		dev_info(dev, "  %6s %#012llx..%#012llx -> %#012llx\n",
372  			 range_type, range.cpu_addr,
373  			 range.cpu_addr + range.size - 1, range.pci_addr);
374  
375  		/*
376  		 * If we failed translation or got a zero-sized region
377  		 * then skip this range
378  		 */
379  		if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
380  			continue;
381  
382  		err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
383  		if (err)
384  			continue;
385  
386  		res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
387  		if (!res) {
388  			err = -ENOMEM;
389  			goto failed;
390  		}
391  
392  		if (resource_type(res) == IORESOURCE_IO) {
393  			if (!io_base) {
394  				dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
395  					dev_node);
396  				err = -EINVAL;
397  				goto failed;
398  			}
399  			if (*io_base != (resource_size_t)OF_BAD_ADDR)
400  				dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
401  					 dev_node);
402  			*io_base = range.cpu_addr;
403  		} else if (resource_type(res) == IORESOURCE_MEM) {
404  			res->flags &= ~IORESOURCE_MEM_64;
405  		}
406  
407  		pci_add_resource_offset(resources, res,	res->start - range.pci_addr);
408  	}
409  
410  	/* Check for dma-ranges property */
411  	if (!ib_resources)
412  		return 0;
413  	err = of_pci_dma_range_parser_init(&parser, dev_node);
414  	if (err)
415  		return 0;
416  
417  	dev_dbg(dev, "Parsing dma-ranges property...\n");
418  	for_each_of_pci_range(&parser, &range) {
419  		/*
420  		 * If we failed translation or got a zero-sized region
421  		 * then skip this range
422  		 */
423  		if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
424  		    range.cpu_addr == OF_BAD_ADDR || range.size == 0)
425  			continue;
426  
427  		dev_info(dev, "  %6s %#012llx..%#012llx -> %#012llx\n",
428  			 "IB MEM", range.cpu_addr,
429  			 range.cpu_addr + range.size - 1, range.pci_addr);
430  
431  
432  		err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
433  		if (err)
434  			continue;
435  
436  		res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
437  		if (!res) {
438  			err = -ENOMEM;
439  			goto failed;
440  		}
441  
442  		pci_add_resource_offset(ib_resources, res,
443  					res->start - range.pci_addr);
444  	}
445  
446  	return 0;
447  
448  failed:
449  	pci_free_resource_list(resources);
450  	return err;
451  }
452  
453  #if IS_ENABLED(CONFIG_OF_IRQ)
454  /**
455   * of_irq_parse_pci - Resolve the interrupt for a PCI device
456   * @pdev:       the device whose interrupt is to be resolved
457   * @out_irq:    structure of_phandle_args filled by this function
458   *
459   * This function resolves the PCI interrupt for a given PCI device. If a
460   * device-node exists for a given pci_dev, it will use normal OF tree
461   * walking. If not, it will implement standard swizzling and walk up the
462   * PCI tree until an device-node is found, at which point it will finish
463   * resolving using the OF tree walking.
464   */
of_irq_parse_pci(const struct pci_dev * pdev,struct of_phandle_args * out_irq)465  static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
466  {
467  	struct device_node *dn, *ppnode = NULL;
468  	struct pci_dev *ppdev;
469  	__be32 laddr[3];
470  	u8 pin;
471  	int rc;
472  
473  	/*
474  	 * Check if we have a device node, if yes, fallback to standard
475  	 * device tree parsing
476  	 */
477  	dn = pci_device_to_OF_node(pdev);
478  	if (dn) {
479  		rc = of_irq_parse_one(dn, 0, out_irq);
480  		if (!rc)
481  			return rc;
482  	}
483  
484  	/*
485  	 * Ok, we don't, time to have fun. Let's start by building up an
486  	 * interrupt spec.  we assume #interrupt-cells is 1, which is standard
487  	 * for PCI. If you do different, then don't use that routine.
488  	 */
489  	rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
490  	if (rc != 0)
491  		goto err;
492  	/* No pin, exit with no error message. */
493  	if (pin == 0)
494  		return -ENODEV;
495  
496  	/* Local interrupt-map in the device node? Use it! */
497  	if (of_property_present(dn, "interrupt-map")) {
498  		pin = pci_swizzle_interrupt_pin(pdev, pin);
499  		ppnode = dn;
500  	}
501  
502  	/* Now we walk up the PCI tree */
503  	while (!ppnode) {
504  		/* Get the pci_dev of our parent */
505  		ppdev = pdev->bus->self;
506  
507  		/* Ouch, it's a host bridge... */
508  		if (ppdev == NULL) {
509  			ppnode = pci_bus_to_OF_node(pdev->bus);
510  
511  			/* No node for host bridge ? give up */
512  			if (ppnode == NULL) {
513  				rc = -EINVAL;
514  				goto err;
515  			}
516  		} else {
517  			/* We found a P2P bridge, check if it has a node */
518  			ppnode = pci_device_to_OF_node(ppdev);
519  		}
520  
521  		/*
522  		 * Ok, we have found a parent with a device-node, hand over to
523  		 * the OF parsing code.
524  		 * We build a unit address from the linux device to be used for
525  		 * resolution. Note that we use the linux bus number which may
526  		 * not match your firmware bus numbering.
527  		 * Fortunately, in most cases, interrupt-map-mask doesn't
528  		 * include the bus number as part of the matching.
529  		 * You should still be careful about that though if you intend
530  		 * to rely on this function (you ship a firmware that doesn't
531  		 * create device nodes for all PCI devices).
532  		 */
533  		if (ppnode)
534  			break;
535  
536  		/*
537  		 * We can only get here if we hit a P2P bridge with no node;
538  		 * let's do standard swizzling and try again
539  		 */
540  		pin = pci_swizzle_interrupt_pin(pdev, pin);
541  		pdev = ppdev;
542  	}
543  
544  	out_irq->np = ppnode;
545  	out_irq->args_count = 1;
546  	out_irq->args[0] = pin;
547  	laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
548  	laddr[1] = laddr[2] = cpu_to_be32(0);
549  	rc = of_irq_parse_raw(laddr, out_irq);
550  	if (rc)
551  		goto err;
552  	return 0;
553  err:
554  	if (rc == -ENOENT) {
555  		dev_warn(&pdev->dev,
556  			"%s: no interrupt-map found, INTx interrupts not available\n",
557  			__func__);
558  		pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
559  			__func__);
560  	} else {
561  		dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
562  	}
563  	return rc;
564  }
565  
566  /**
567   * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ
568   * @dev: The PCI device needing an IRQ
569   * @slot: PCI slot number; passed when used as map_irq callback. Unused
570   * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused
571   *
572   * @slot and @pin are unused, but included in the function so that this
573   * function can be used directly as the map_irq callback to
574   * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
575   */
of_irq_parse_and_map_pci(const struct pci_dev * dev,u8 slot,u8 pin)576  int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
577  {
578  	struct of_phandle_args oirq;
579  	int ret;
580  
581  	ret = of_irq_parse_pci(dev, &oirq);
582  	if (ret)
583  		return 0; /* Proper return code 0 == NO_IRQ */
584  
585  	return irq_create_of_mapping(&oirq);
586  }
587  EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
588  #endif	/* CONFIG_OF_IRQ */
589  
pci_parse_request_of_pci_ranges(struct device * dev,struct pci_host_bridge * bridge)590  static int pci_parse_request_of_pci_ranges(struct device *dev,
591  					   struct pci_host_bridge *bridge)
592  {
593  	int err, res_valid = 0;
594  	resource_size_t iobase;
595  	struct resource_entry *win, *tmp;
596  
597  	INIT_LIST_HEAD(&bridge->windows);
598  	INIT_LIST_HEAD(&bridge->dma_ranges);
599  
600  	err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
601  						    &bridge->dma_ranges, &iobase);
602  	if (err)
603  		return err;
604  
605  	err = devm_request_pci_bus_resources(dev, &bridge->windows);
606  	if (err)
607  		return err;
608  
609  	resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
610  		struct resource *res = win->res;
611  
612  		switch (resource_type(res)) {
613  		case IORESOURCE_IO:
614  			err = devm_pci_remap_iospace(dev, res, iobase);
615  			if (err) {
616  				dev_warn(dev, "error %d: failed to map resource %pR\n",
617  					 err, res);
618  				resource_list_destroy_entry(win);
619  			}
620  			break;
621  		case IORESOURCE_MEM:
622  			res_valid |= !(res->flags & IORESOURCE_PREFETCH);
623  
624  			if (!(res->flags & IORESOURCE_PREFETCH))
625  				if (upper_32_bits(resource_size(res)))
626  					dev_warn(dev, "Memory resource size exceeds max for 32 bits\n");
627  
628  			break;
629  		}
630  	}
631  
632  	if (!res_valid)
633  		dev_warn(dev, "non-prefetchable memory resource required\n");
634  
635  	return 0;
636  }
637  
devm_of_pci_bridge_init(struct device * dev,struct pci_host_bridge * bridge)638  int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge)
639  {
640  	if (!dev->of_node)
641  		return 0;
642  
643  	bridge->swizzle_irq = pci_common_swizzle;
644  	bridge->map_irq = of_irq_parse_and_map_pci;
645  
646  	return pci_parse_request_of_pci_ranges(dev, bridge);
647  }
648  
649  #ifdef CONFIG_PCI_DYNAMIC_OF_NODES
650  
of_pci_remove_node(struct pci_dev * pdev)651  void of_pci_remove_node(struct pci_dev *pdev)
652  {
653  	struct device_node *np;
654  
655  	np = pci_device_to_OF_node(pdev);
656  	if (!np || !of_node_check_flag(np, OF_DYNAMIC))
657  		return;
658  	pdev->dev.of_node = NULL;
659  
660  	of_changeset_revert(np->data);
661  	of_changeset_destroy(np->data);
662  	of_node_put(np);
663  }
664  
of_pci_make_dev_node(struct pci_dev * pdev)665  void of_pci_make_dev_node(struct pci_dev *pdev)
666  {
667  	struct device_node *ppnode, *np = NULL;
668  	const char *pci_type;
669  	struct of_changeset *cset;
670  	const char *name;
671  	int ret;
672  
673  	/*
674  	 * If there is already a device tree node linked to this device,
675  	 * return immediately.
676  	 */
677  	if (pci_device_to_OF_node(pdev))
678  		return;
679  
680  	/* Check if there is device tree node for parent device */
681  	if (!pdev->bus->self)
682  		ppnode = pdev->bus->dev.of_node;
683  	else
684  		ppnode = pdev->bus->self->dev.of_node;
685  	if (!ppnode)
686  		return;
687  
688  	if (pci_is_bridge(pdev))
689  		pci_type = "pci";
690  	else
691  		pci_type = "dev";
692  
693  	name = kasprintf(GFP_KERNEL, "%s@%x,%x", pci_type,
694  			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
695  	if (!name)
696  		return;
697  
698  	cset = kmalloc(sizeof(*cset), GFP_KERNEL);
699  	if (!cset)
700  		goto out_free_name;
701  	of_changeset_init(cset);
702  
703  	np = of_changeset_create_node(cset, ppnode, name);
704  	if (!np)
705  		goto out_destroy_cset;
706  
707  	ret = of_pci_add_properties(pdev, cset, np);
708  	if (ret)
709  		goto out_free_node;
710  
711  	ret = of_changeset_apply(cset);
712  	if (ret)
713  		goto out_free_node;
714  
715  	np->data = cset;
716  	pdev->dev.of_node = np;
717  	kfree(name);
718  
719  	return;
720  
721  out_free_node:
722  	of_node_put(np);
723  out_destroy_cset:
724  	of_changeset_destroy(cset);
725  	kfree(cset);
726  out_free_name:
727  	kfree(name);
728  }
729  #endif
730  
731  #endif /* CONFIG_PCI */
732  
733  /**
734   * of_pci_get_max_link_speed - Find the maximum link speed of the given device node.
735   * @node: Device tree node with the maximum link speed information.
736   *
737   * This function will try to find the limitation of link speed by finding
738   * a property called "max-link-speed" of the given device node.
739   *
740   * Return:
741   * * > 0	- On success, a maximum link speed.
742   * * -EINVAL	- Invalid "max-link-speed" property value, or failure to access
743   *		  the property of the device tree node.
744   *
745   * Returns the associated max link speed from DT, or a negative value if the
746   * required property is not found or is invalid.
747   */
of_pci_get_max_link_speed(struct device_node * node)748  int of_pci_get_max_link_speed(struct device_node *node)
749  {
750  	u32 max_link_speed;
751  
752  	if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
753  	    max_link_speed == 0 || max_link_speed > 4)
754  		return -EINVAL;
755  
756  	return max_link_speed;
757  }
758  EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
759  
760  /**
761   * of_pci_get_slot_power_limit - Parses the "slot-power-limit-milliwatt"
762   *				 property.
763   *
764   * @node: device tree node with the slot power limit information
765   * @slot_power_limit_value: pointer where the value should be stored in PCIe
766   *			    Slot Capabilities Register format
767   * @slot_power_limit_scale: pointer where the scale should be stored in PCIe
768   *			    Slot Capabilities Register format
769   *
770   * Returns the slot power limit in milliwatts and if @slot_power_limit_value
771   * and @slot_power_limit_scale pointers are non-NULL, fills in the value and
772   * scale in format used by PCIe Slot Capabilities Register.
773   *
774   * If the property is not found or is invalid, returns 0.
775   */
of_pci_get_slot_power_limit(struct device_node * node,u8 * slot_power_limit_value,u8 * slot_power_limit_scale)776  u32 of_pci_get_slot_power_limit(struct device_node *node,
777  				u8 *slot_power_limit_value,
778  				u8 *slot_power_limit_scale)
779  {
780  	u32 slot_power_limit_mw;
781  	u8 value, scale;
782  
783  	if (of_property_read_u32(node, "slot-power-limit-milliwatt",
784  				 &slot_power_limit_mw))
785  		slot_power_limit_mw = 0;
786  
787  	/* Calculate Slot Power Limit Value and Slot Power Limit Scale */
788  	if (slot_power_limit_mw == 0) {
789  		value = 0x00;
790  		scale = 0;
791  	} else if (slot_power_limit_mw <= 255) {
792  		value = slot_power_limit_mw;
793  		scale = 3;
794  	} else if (slot_power_limit_mw <= 255*10) {
795  		value = slot_power_limit_mw / 10;
796  		scale = 2;
797  		slot_power_limit_mw = slot_power_limit_mw / 10 * 10;
798  	} else if (slot_power_limit_mw <= 255*100) {
799  		value = slot_power_limit_mw / 100;
800  		scale = 1;
801  		slot_power_limit_mw = slot_power_limit_mw / 100 * 100;
802  	} else if (slot_power_limit_mw <= 239*1000) {
803  		value = slot_power_limit_mw / 1000;
804  		scale = 0;
805  		slot_power_limit_mw = slot_power_limit_mw / 1000 * 1000;
806  	} else if (slot_power_limit_mw < 250*1000) {
807  		value = 0xEF;
808  		scale = 0;
809  		slot_power_limit_mw = 239*1000;
810  	} else if (slot_power_limit_mw <= 600*1000) {
811  		value = 0xF0 + (slot_power_limit_mw / 1000 - 250) / 25;
812  		scale = 0;
813  		slot_power_limit_mw = slot_power_limit_mw / (1000*25) * (1000*25);
814  	} else {
815  		value = 0xFE;
816  		scale = 0;
817  		slot_power_limit_mw = 600*1000;
818  	}
819  
820  	if (slot_power_limit_value)
821  		*slot_power_limit_value = value;
822  
823  	if (slot_power_limit_scale)
824  		*slot_power_limit_scale = scale;
825  
826  	return slot_power_limit_mw;
827  }
828  EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);
829