1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * Procedures for creating, accessing and interpreting the device tree.
4   *
5   * Paul Mackerras	August 1996.
6   * Copyright (C) 1996-2005 Paul Mackerras.
7   *
8   *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9   *    {engebret|bergner}@us.ibm.com
10   *
11   *  Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net
12   *
13   *  Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
14   *  Grant Likely.
15   */
16  
17  #define pr_fmt(fmt)	"OF: " fmt
18  
19  #include <linux/cleanup.h>
20  #include <linux/console.h>
21  #include <linux/ctype.h>
22  #include <linux/cpu.h>
23  #include <linux/module.h>
24  #include <linux/of.h>
25  #include <linux/of_device.h>
26  #include <linux/of_graph.h>
27  #include <linux/spinlock.h>
28  #include <linux/slab.h>
29  #include <linux/string.h>
30  #include <linux/proc_fs.h>
31  
32  #include "of_private.h"
33  
34  LIST_HEAD(aliases_lookup);
35  
36  struct device_node *of_root;
37  EXPORT_SYMBOL(of_root);
38  struct device_node *of_chosen;
39  EXPORT_SYMBOL(of_chosen);
40  struct device_node *of_aliases;
41  struct device_node *of_stdout;
42  static const char *of_stdout_options;
43  
44  struct kset *of_kset;
45  
46  /*
47   * Used to protect the of_aliases, to hold off addition of nodes to sysfs.
48   * This mutex must be held whenever modifications are being made to the
49   * device tree. The of_{attach,detach}_node() and
50   * of_{add,remove,update}_property() helpers make sure this happens.
51   */
52  DEFINE_MUTEX(of_mutex);
53  
54  /* use when traversing tree through the child, sibling,
55   * or parent members of struct device_node.
56   */
57  DEFINE_RAW_SPINLOCK(devtree_lock);
58  
of_node_name_eq(const struct device_node * np,const char * name)59  bool of_node_name_eq(const struct device_node *np, const char *name)
60  {
61  	const char *node_name;
62  	size_t len;
63  
64  	if (!np)
65  		return false;
66  
67  	node_name = kbasename(np->full_name);
68  	len = strchrnul(node_name, '@') - node_name;
69  
70  	return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
71  }
72  EXPORT_SYMBOL(of_node_name_eq);
73  
of_node_name_prefix(const struct device_node * np,const char * prefix)74  bool of_node_name_prefix(const struct device_node *np, const char *prefix)
75  {
76  	if (!np)
77  		return false;
78  
79  	return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
80  }
81  EXPORT_SYMBOL(of_node_name_prefix);
82  
__of_node_is_type(const struct device_node * np,const char * type)83  static bool __of_node_is_type(const struct device_node *np, const char *type)
84  {
85  	const char *match = __of_get_property(np, "device_type", NULL);
86  
87  	return np && match && type && !strcmp(match, type);
88  }
89  
of_bus_n_addr_cells(struct device_node * np)90  int of_bus_n_addr_cells(struct device_node *np)
91  {
92  	u32 cells;
93  
94  	for (; np; np = np->parent)
95  		if (!of_property_read_u32(np, "#address-cells", &cells))
96  			return cells;
97  
98  	/* No #address-cells property for the root node */
99  	return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
100  }
101  
of_n_addr_cells(struct device_node * np)102  int of_n_addr_cells(struct device_node *np)
103  {
104  	if (np->parent)
105  		np = np->parent;
106  
107  	return of_bus_n_addr_cells(np);
108  }
109  EXPORT_SYMBOL(of_n_addr_cells);
110  
of_bus_n_size_cells(struct device_node * np)111  int of_bus_n_size_cells(struct device_node *np)
112  {
113  	u32 cells;
114  
115  	for (; np; np = np->parent)
116  		if (!of_property_read_u32(np, "#size-cells", &cells))
117  			return cells;
118  
119  	/* No #size-cells property for the root node */
120  	return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
121  }
122  
of_n_size_cells(struct device_node * np)123  int of_n_size_cells(struct device_node *np)
124  {
125  	if (np->parent)
126  		np = np->parent;
127  
128  	return of_bus_n_size_cells(np);
129  }
130  EXPORT_SYMBOL(of_n_size_cells);
131  
132  #ifdef CONFIG_NUMA
of_node_to_nid(struct device_node * np)133  int __weak of_node_to_nid(struct device_node *np)
134  {
135  	return NUMA_NO_NODE;
136  }
137  #endif
138  
139  #define OF_PHANDLE_CACHE_BITS	7
140  #define OF_PHANDLE_CACHE_SZ	BIT(OF_PHANDLE_CACHE_BITS)
141  
142  static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
143  
of_phandle_cache_hash(phandle handle)144  static u32 of_phandle_cache_hash(phandle handle)
145  {
146  	return hash_32(handle, OF_PHANDLE_CACHE_BITS);
147  }
148  
149  /*
150   * Caller must hold devtree_lock.
151   */
__of_phandle_cache_inv_entry(phandle handle)152  void __of_phandle_cache_inv_entry(phandle handle)
153  {
154  	u32 handle_hash;
155  	struct device_node *np;
156  
157  	if (!handle)
158  		return;
159  
160  	handle_hash = of_phandle_cache_hash(handle);
161  
162  	np = phandle_cache[handle_hash];
163  	if (np && handle == np->phandle)
164  		phandle_cache[handle_hash] = NULL;
165  }
166  
of_core_init(void)167  void __init of_core_init(void)
168  {
169  	struct device_node *np;
170  
171  	of_platform_register_reconfig_notifier();
172  
173  	/* Create the kset, and register existing nodes */
174  	mutex_lock(&of_mutex);
175  	of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
176  	if (!of_kset) {
177  		mutex_unlock(&of_mutex);
178  		pr_err("failed to register existing nodes\n");
179  		return;
180  	}
181  	for_each_of_allnodes(np) {
182  		__of_attach_node_sysfs(np);
183  		if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
184  			phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
185  	}
186  	mutex_unlock(&of_mutex);
187  
188  	/* Symlink in /proc as required by userspace ABI */
189  	if (of_root)
190  		proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
191  }
192  
__of_find_property(const struct device_node * np,const char * name,int * lenp)193  static struct property *__of_find_property(const struct device_node *np,
194  					   const char *name, int *lenp)
195  {
196  	struct property *pp;
197  
198  	if (!np)
199  		return NULL;
200  
201  	for (pp = np->properties; pp; pp = pp->next) {
202  		if (of_prop_cmp(pp->name, name) == 0) {
203  			if (lenp)
204  				*lenp = pp->length;
205  			break;
206  		}
207  	}
208  
209  	return pp;
210  }
211  
of_find_property(const struct device_node * np,const char * name,int * lenp)212  struct property *of_find_property(const struct device_node *np,
213  				  const char *name,
214  				  int *lenp)
215  {
216  	struct property *pp;
217  	unsigned long flags;
218  
219  	raw_spin_lock_irqsave(&devtree_lock, flags);
220  	pp = __of_find_property(np, name, lenp);
221  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
222  
223  	return pp;
224  }
225  EXPORT_SYMBOL(of_find_property);
226  
__of_find_all_nodes(struct device_node * prev)227  struct device_node *__of_find_all_nodes(struct device_node *prev)
228  {
229  	struct device_node *np;
230  	if (!prev) {
231  		np = of_root;
232  	} else if (prev->child) {
233  		np = prev->child;
234  	} else {
235  		/* Walk back up looking for a sibling, or the end of the structure */
236  		np = prev;
237  		while (np->parent && !np->sibling)
238  			np = np->parent;
239  		np = np->sibling; /* Might be null at the end of the tree */
240  	}
241  	return np;
242  }
243  
244  /**
245   * of_find_all_nodes - Get next node in global list
246   * @prev:	Previous node or NULL to start iteration
247   *		of_node_put() will be called on it
248   *
249   * Return: A node pointer with refcount incremented, use
250   * of_node_put() on it when done.
251   */
of_find_all_nodes(struct device_node * prev)252  struct device_node *of_find_all_nodes(struct device_node *prev)
253  {
254  	struct device_node *np;
255  	unsigned long flags;
256  
257  	raw_spin_lock_irqsave(&devtree_lock, flags);
258  	np = __of_find_all_nodes(prev);
259  	of_node_get(np);
260  	of_node_put(prev);
261  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
262  	return np;
263  }
264  EXPORT_SYMBOL(of_find_all_nodes);
265  
266  /*
267   * Find a property with a given name for a given node
268   * and return the value.
269   */
__of_get_property(const struct device_node * np,const char * name,int * lenp)270  const void *__of_get_property(const struct device_node *np,
271  			      const char *name, int *lenp)
272  {
273  	struct property *pp = __of_find_property(np, name, lenp);
274  
275  	return pp ? pp->value : NULL;
276  }
277  
278  /*
279   * Find a property with a given name for a given node
280   * and return the value.
281   */
of_get_property(const struct device_node * np,const char * name,int * lenp)282  const void *of_get_property(const struct device_node *np, const char *name,
283  			    int *lenp)
284  {
285  	struct property *pp = of_find_property(np, name, lenp);
286  
287  	return pp ? pp->value : NULL;
288  }
289  EXPORT_SYMBOL(of_get_property);
290  
291  /**
292   * __of_device_is_compatible() - Check if the node matches given constraints
293   * @device: pointer to node
294   * @compat: required compatible string, NULL or "" for any match
295   * @type: required device_type value, NULL or "" for any match
296   * @name: required node name, NULL or "" for any match
297   *
298   * Checks if the given @compat, @type and @name strings match the
299   * properties of the given @device. A constraints can be skipped by
300   * passing NULL or an empty string as the constraint.
301   *
302   * Returns 0 for no match, and a positive integer on match. The return
303   * value is a relative score with larger values indicating better
304   * matches. The score is weighted for the most specific compatible value
305   * to get the highest score. Matching type is next, followed by matching
306   * name. Practically speaking, this results in the following priority
307   * order for matches:
308   *
309   * 1. specific compatible && type && name
310   * 2. specific compatible && type
311   * 3. specific compatible && name
312   * 4. specific compatible
313   * 5. general compatible && type && name
314   * 6. general compatible && type
315   * 7. general compatible && name
316   * 8. general compatible
317   * 9. type && name
318   * 10. type
319   * 11. name
320   */
__of_device_is_compatible(const struct device_node * device,const char * compat,const char * type,const char * name)321  static int __of_device_is_compatible(const struct device_node *device,
322  				     const char *compat, const char *type, const char *name)
323  {
324  	struct property *prop;
325  	const char *cp;
326  	int index = 0, score = 0;
327  
328  	/* Compatible match has highest priority */
329  	if (compat && compat[0]) {
330  		prop = __of_find_property(device, "compatible", NULL);
331  		for (cp = of_prop_next_string(prop, NULL); cp;
332  		     cp = of_prop_next_string(prop, cp), index++) {
333  			if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
334  				score = INT_MAX/2 - (index << 2);
335  				break;
336  			}
337  		}
338  		if (!score)
339  			return 0;
340  	}
341  
342  	/* Matching type is better than matching name */
343  	if (type && type[0]) {
344  		if (!__of_node_is_type(device, type))
345  			return 0;
346  		score += 2;
347  	}
348  
349  	/* Matching name is a bit better than not */
350  	if (name && name[0]) {
351  		if (!of_node_name_eq(device, name))
352  			return 0;
353  		score++;
354  	}
355  
356  	return score;
357  }
358  
359  /** Checks if the given "compat" string matches one of the strings in
360   * the device's "compatible" property
361   */
of_device_is_compatible(const struct device_node * device,const char * compat)362  int of_device_is_compatible(const struct device_node *device,
363  		const char *compat)
364  {
365  	unsigned long flags;
366  	int res;
367  
368  	raw_spin_lock_irqsave(&devtree_lock, flags);
369  	res = __of_device_is_compatible(device, compat, NULL, NULL);
370  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
371  	return res;
372  }
373  EXPORT_SYMBOL(of_device_is_compatible);
374  
375  /** Checks if the device is compatible with any of the entries in
376   *  a NULL terminated array of strings. Returns the best match
377   *  score or 0.
378   */
of_device_compatible_match(const struct device_node * device,const char * const * compat)379  int of_device_compatible_match(const struct device_node *device,
380  			       const char *const *compat)
381  {
382  	unsigned int tmp, score = 0;
383  
384  	if (!compat)
385  		return 0;
386  
387  	while (*compat) {
388  		tmp = of_device_is_compatible(device, *compat);
389  		if (tmp > score)
390  			score = tmp;
391  		compat++;
392  	}
393  
394  	return score;
395  }
396  EXPORT_SYMBOL_GPL(of_device_compatible_match);
397  
398  /**
399   * of_machine_compatible_match - Test root of device tree against a compatible array
400   * @compats: NULL terminated array of compatible strings to look for in root node's compatible property.
401   *
402   * Returns true if the root node has any of the given compatible values in its
403   * compatible property.
404   */
of_machine_compatible_match(const char * const * compats)405  bool of_machine_compatible_match(const char *const *compats)
406  {
407  	struct device_node *root;
408  	int rc = 0;
409  
410  	root = of_find_node_by_path("/");
411  	if (root) {
412  		rc = of_device_compatible_match(root, compats);
413  		of_node_put(root);
414  	}
415  
416  	return rc != 0;
417  }
418  EXPORT_SYMBOL(of_machine_compatible_match);
419  
__of_device_is_status(const struct device_node * device,const char * const * strings)420  static bool __of_device_is_status(const struct device_node *device,
421  				  const char * const*strings)
422  {
423  	const char *status;
424  	int statlen;
425  
426  	if (!device)
427  		return false;
428  
429  	status = __of_get_property(device, "status", &statlen);
430  	if (status == NULL)
431  		return false;
432  
433  	if (statlen > 0) {
434  		while (*strings) {
435  			unsigned int len = strlen(*strings);
436  
437  			if ((*strings)[len - 1] == '-') {
438  				if (!strncmp(status, *strings, len))
439  					return true;
440  			} else {
441  				if (!strcmp(status, *strings))
442  					return true;
443  			}
444  			strings++;
445  		}
446  	}
447  
448  	return false;
449  }
450  
451  /**
452   *  __of_device_is_available - check if a device is available for use
453   *
454   *  @device: Node to check for availability, with locks already held
455   *
456   *  Return: True if the status property is absent or set to "okay" or "ok",
457   *  false otherwise
458   */
__of_device_is_available(const struct device_node * device)459  static bool __of_device_is_available(const struct device_node *device)
460  {
461  	static const char * const ok[] = {"okay", "ok", NULL};
462  
463  	if (!device)
464  		return false;
465  
466  	return !__of_get_property(device, "status", NULL) ||
467  		__of_device_is_status(device, ok);
468  }
469  
470  /**
471   *  __of_device_is_reserved - check if a device is reserved
472   *
473   *  @device: Node to check for availability, with locks already held
474   *
475   *  Return: True if the status property is set to "reserved", false otherwise
476   */
__of_device_is_reserved(const struct device_node * device)477  static bool __of_device_is_reserved(const struct device_node *device)
478  {
479  	static const char * const reserved[] = {"reserved", NULL};
480  
481  	return __of_device_is_status(device, reserved);
482  }
483  
484  /**
485   *  of_device_is_available - check if a device is available for use
486   *
487   *  @device: Node to check for availability
488   *
489   *  Return: True if the status property is absent or set to "okay" or "ok",
490   *  false otherwise
491   */
of_device_is_available(const struct device_node * device)492  bool of_device_is_available(const struct device_node *device)
493  {
494  	unsigned long flags;
495  	bool res;
496  
497  	raw_spin_lock_irqsave(&devtree_lock, flags);
498  	res = __of_device_is_available(device);
499  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
500  	return res;
501  
502  }
503  EXPORT_SYMBOL(of_device_is_available);
504  
505  /**
506   *  __of_device_is_fail - check if a device has status "fail" or "fail-..."
507   *
508   *  @device: Node to check status for, with locks already held
509   *
510   *  Return: True if the status property is set to "fail" or "fail-..." (for any
511   *  error code suffix), false otherwise
512   */
__of_device_is_fail(const struct device_node * device)513  static bool __of_device_is_fail(const struct device_node *device)
514  {
515  	static const char * const fail[] = {"fail", "fail-", NULL};
516  
517  	return __of_device_is_status(device, fail);
518  }
519  
520  /**
521   *  of_device_is_big_endian - check if a device has BE registers
522   *
523   *  @device: Node to check for endianness
524   *
525   *  Return: True if the device has a "big-endian" property, or if the kernel
526   *  was compiled for BE *and* the device has a "native-endian" property.
527   *  Returns false otherwise.
528   *
529   *  Callers would nominally use ioread32be/iowrite32be if
530   *  of_device_is_big_endian() == true, or readl/writel otherwise.
531   */
of_device_is_big_endian(const struct device_node * device)532  bool of_device_is_big_endian(const struct device_node *device)
533  {
534  	if (of_property_read_bool(device, "big-endian"))
535  		return true;
536  	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
537  	    of_property_read_bool(device, "native-endian"))
538  		return true;
539  	return false;
540  }
541  EXPORT_SYMBOL(of_device_is_big_endian);
542  
543  /**
544   * of_get_parent - Get a node's parent if any
545   * @node:	Node to get parent
546   *
547   * Return: A node pointer with refcount incremented, use
548   * of_node_put() on it when done.
549   */
of_get_parent(const struct device_node * node)550  struct device_node *of_get_parent(const struct device_node *node)
551  {
552  	struct device_node *np;
553  	unsigned long flags;
554  
555  	if (!node)
556  		return NULL;
557  
558  	raw_spin_lock_irqsave(&devtree_lock, flags);
559  	np = of_node_get(node->parent);
560  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
561  	return np;
562  }
563  EXPORT_SYMBOL(of_get_parent);
564  
565  /**
566   * of_get_next_parent - Iterate to a node's parent
567   * @node:	Node to get parent of
568   *
569   * This is like of_get_parent() except that it drops the
570   * refcount on the passed node, making it suitable for iterating
571   * through a node's parents.
572   *
573   * Return: A node pointer with refcount incremented, use
574   * of_node_put() on it when done.
575   */
of_get_next_parent(struct device_node * node)576  struct device_node *of_get_next_parent(struct device_node *node)
577  {
578  	struct device_node *parent;
579  	unsigned long flags;
580  
581  	if (!node)
582  		return NULL;
583  
584  	raw_spin_lock_irqsave(&devtree_lock, flags);
585  	parent = of_node_get(node->parent);
586  	of_node_put(node);
587  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
588  	return parent;
589  }
590  EXPORT_SYMBOL(of_get_next_parent);
591  
__of_get_next_child(const struct device_node * node,struct device_node * prev)592  static struct device_node *__of_get_next_child(const struct device_node *node,
593  						struct device_node *prev)
594  {
595  	struct device_node *next;
596  
597  	if (!node)
598  		return NULL;
599  
600  	next = prev ? prev->sibling : node->child;
601  	of_node_get(next);
602  	of_node_put(prev);
603  	return next;
604  }
605  #define __for_each_child_of_node(parent, child) \
606  	for (child = __of_get_next_child(parent, NULL); child != NULL; \
607  	     child = __of_get_next_child(parent, child))
608  
609  /**
610   * of_get_next_child - Iterate a node childs
611   * @node:	parent node
612   * @prev:	previous child of the parent node, or NULL to get first
613   *
614   * Return: A node pointer with refcount incremented, use of_node_put() on
615   * it when done. Returns NULL when prev is the last child. Decrements the
616   * refcount of prev.
617   */
of_get_next_child(const struct device_node * node,struct device_node * prev)618  struct device_node *of_get_next_child(const struct device_node *node,
619  	struct device_node *prev)
620  {
621  	struct device_node *next;
622  	unsigned long flags;
623  
624  	raw_spin_lock_irqsave(&devtree_lock, flags);
625  	next = __of_get_next_child(node, prev);
626  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
627  	return next;
628  }
629  EXPORT_SYMBOL(of_get_next_child);
630  
of_get_next_status_child(const struct device_node * node,struct device_node * prev,bool (* checker)(const struct device_node *))631  static struct device_node *of_get_next_status_child(const struct device_node *node,
632  						    struct device_node *prev,
633  						    bool (*checker)(const struct device_node *))
634  {
635  	struct device_node *next;
636  	unsigned long flags;
637  
638  	if (!node)
639  		return NULL;
640  
641  	raw_spin_lock_irqsave(&devtree_lock, flags);
642  	next = prev ? prev->sibling : node->child;
643  	for (; next; next = next->sibling) {
644  		if (!checker(next))
645  			continue;
646  		if (of_node_get(next))
647  			break;
648  	}
649  	of_node_put(prev);
650  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
651  	return next;
652  }
653  
654  /**
655   * of_get_next_available_child - Find the next available child node
656   * @node:	parent node
657   * @prev:	previous child of the parent node, or NULL to get first
658   *
659   * This function is like of_get_next_child(), except that it
660   * automatically skips any disabled nodes (i.e. status = "disabled").
661   */
of_get_next_available_child(const struct device_node * node,struct device_node * prev)662  struct device_node *of_get_next_available_child(const struct device_node *node,
663  	struct device_node *prev)
664  {
665  	return of_get_next_status_child(node, prev, __of_device_is_available);
666  }
667  EXPORT_SYMBOL(of_get_next_available_child);
668  
669  /**
670   * of_get_next_reserved_child - Find the next reserved child node
671   * @node:	parent node
672   * @prev:	previous child of the parent node, or NULL to get first
673   *
674   * This function is like of_get_next_child(), except that it
675   * automatically skips any disabled nodes (i.e. status = "disabled").
676   */
of_get_next_reserved_child(const struct device_node * node,struct device_node * prev)677  struct device_node *of_get_next_reserved_child(const struct device_node *node,
678  						struct device_node *prev)
679  {
680  	return of_get_next_status_child(node, prev, __of_device_is_reserved);
681  }
682  EXPORT_SYMBOL(of_get_next_reserved_child);
683  
684  /**
685   * of_get_next_cpu_node - Iterate on cpu nodes
686   * @prev:	previous child of the /cpus node, or NULL to get first
687   *
688   * Unusable CPUs (those with the status property set to "fail" or "fail-...")
689   * will be skipped.
690   *
691   * Return: A cpu node pointer with refcount incremented, use of_node_put()
692   * on it when done. Returns NULL when prev is the last child. Decrements
693   * the refcount of prev.
694   */
of_get_next_cpu_node(struct device_node * prev)695  struct device_node *of_get_next_cpu_node(struct device_node *prev)
696  {
697  	struct device_node *next = NULL;
698  	unsigned long flags;
699  	struct device_node *node;
700  
701  	if (!prev)
702  		node = of_find_node_by_path("/cpus");
703  
704  	raw_spin_lock_irqsave(&devtree_lock, flags);
705  	if (prev)
706  		next = prev->sibling;
707  	else if (node) {
708  		next = node->child;
709  		of_node_put(node);
710  	}
711  	for (; next; next = next->sibling) {
712  		if (__of_device_is_fail(next))
713  			continue;
714  		if (!(of_node_name_eq(next, "cpu") ||
715  		      __of_node_is_type(next, "cpu")))
716  			continue;
717  		if (of_node_get(next))
718  			break;
719  	}
720  	of_node_put(prev);
721  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
722  	return next;
723  }
724  EXPORT_SYMBOL(of_get_next_cpu_node);
725  
726  /**
727   * of_get_compatible_child - Find compatible child node
728   * @parent:	parent node
729   * @compatible:	compatible string
730   *
731   * Lookup child node whose compatible property contains the given compatible
732   * string.
733   *
734   * Return: a node pointer with refcount incremented, use of_node_put() on it
735   * when done; or NULL if not found.
736   */
of_get_compatible_child(const struct device_node * parent,const char * compatible)737  struct device_node *of_get_compatible_child(const struct device_node *parent,
738  				const char *compatible)
739  {
740  	struct device_node *child;
741  
742  	for_each_child_of_node(parent, child) {
743  		if (of_device_is_compatible(child, compatible))
744  			break;
745  	}
746  
747  	return child;
748  }
749  EXPORT_SYMBOL(of_get_compatible_child);
750  
751  /**
752   * of_get_child_by_name - Find the child node by name for a given parent
753   * @node:	parent node
754   * @name:	child name to look for.
755   *
756   * This function looks for child node for given matching name
757   *
758   * Return: A node pointer if found, with refcount incremented, use
759   * of_node_put() on it when done.
760   * Returns NULL if node is not found.
761   */
of_get_child_by_name(const struct device_node * node,const char * name)762  struct device_node *of_get_child_by_name(const struct device_node *node,
763  				const char *name)
764  {
765  	struct device_node *child;
766  
767  	for_each_child_of_node(node, child)
768  		if (of_node_name_eq(child, name))
769  			break;
770  	return child;
771  }
772  EXPORT_SYMBOL(of_get_child_by_name);
773  
__of_find_node_by_path(struct device_node * parent,const char * path)774  struct device_node *__of_find_node_by_path(struct device_node *parent,
775  						const char *path)
776  {
777  	struct device_node *child;
778  	int len;
779  
780  	len = strcspn(path, "/:");
781  	if (!len)
782  		return NULL;
783  
784  	__for_each_child_of_node(parent, child) {
785  		const char *name = kbasename(child->full_name);
786  		if (strncmp(path, name, len) == 0 && (strlen(name) == len))
787  			return child;
788  	}
789  	return NULL;
790  }
791  
__of_find_node_by_full_path(struct device_node * node,const char * path)792  struct device_node *__of_find_node_by_full_path(struct device_node *node,
793  						const char *path)
794  {
795  	const char *separator = strchr(path, ':');
796  
797  	while (node && *path == '/') {
798  		struct device_node *tmp = node;
799  
800  		path++; /* Increment past '/' delimiter */
801  		node = __of_find_node_by_path(node, path);
802  		of_node_put(tmp);
803  		path = strchrnul(path, '/');
804  		if (separator && separator < path)
805  			break;
806  	}
807  	return node;
808  }
809  
810  /**
811   * of_find_node_opts_by_path - Find a node matching a full OF path
812   * @path: Either the full path to match, or if the path does not
813   *       start with '/', the name of a property of the /aliases
814   *       node (an alias).  In the case of an alias, the node
815   *       matching the alias' value will be returned.
816   * @opts: Address of a pointer into which to store the start of
817   *       an options string appended to the end of the path with
818   *       a ':' separator.
819   *
820   * Valid paths:
821   *  * /foo/bar	Full path
822   *  * foo	Valid alias
823   *  * foo/bar	Valid alias + relative path
824   *
825   * Return: A node pointer with refcount incremented, use
826   * of_node_put() on it when done.
827   */
of_find_node_opts_by_path(const char * path,const char ** opts)828  struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
829  {
830  	struct device_node *np = NULL;
831  	struct property *pp;
832  	unsigned long flags;
833  	const char *separator = strchr(path, ':');
834  
835  	if (opts)
836  		*opts = separator ? separator + 1 : NULL;
837  
838  	if (strcmp(path, "/") == 0)
839  		return of_node_get(of_root);
840  
841  	/* The path could begin with an alias */
842  	if (*path != '/') {
843  		int len;
844  		const char *p = separator;
845  
846  		if (!p)
847  			p = strchrnul(path, '/');
848  		len = p - path;
849  
850  		/* of_aliases must not be NULL */
851  		if (!of_aliases)
852  			return NULL;
853  
854  		for_each_property_of_node(of_aliases, pp) {
855  			if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
856  				np = of_find_node_by_path(pp->value);
857  				break;
858  			}
859  		}
860  		if (!np)
861  			return NULL;
862  		path = p;
863  	}
864  
865  	/* Step down the tree matching path components */
866  	raw_spin_lock_irqsave(&devtree_lock, flags);
867  	if (!np)
868  		np = of_node_get(of_root);
869  	np = __of_find_node_by_full_path(np, path);
870  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
871  	return np;
872  }
873  EXPORT_SYMBOL(of_find_node_opts_by_path);
874  
875  /**
876   * of_find_node_by_name - Find a node by its "name" property
877   * @from:	The node to start searching from or NULL; the node
878   *		you pass will not be searched, only the next one
879   *		will. Typically, you pass what the previous call
880   *		returned. of_node_put() will be called on @from.
881   * @name:	The name string to match against
882   *
883   * Return: A node pointer with refcount incremented, use
884   * of_node_put() on it when done.
885   */
of_find_node_by_name(struct device_node * from,const char * name)886  struct device_node *of_find_node_by_name(struct device_node *from,
887  	const char *name)
888  {
889  	struct device_node *np;
890  	unsigned long flags;
891  
892  	raw_spin_lock_irqsave(&devtree_lock, flags);
893  	for_each_of_allnodes_from(from, np)
894  		if (of_node_name_eq(np, name) && of_node_get(np))
895  			break;
896  	of_node_put(from);
897  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
898  	return np;
899  }
900  EXPORT_SYMBOL(of_find_node_by_name);
901  
902  /**
903   * of_find_node_by_type - Find a node by its "device_type" property
904   * @from:	The node to start searching from, or NULL to start searching
905   *		the entire device tree. The node you pass will not be
906   *		searched, only the next one will; typically, you pass
907   *		what the previous call returned. of_node_put() will be
908   *		called on from for you.
909   * @type:	The type string to match against
910   *
911   * Return: A node pointer with refcount incremented, use
912   * of_node_put() on it when done.
913   */
of_find_node_by_type(struct device_node * from,const char * type)914  struct device_node *of_find_node_by_type(struct device_node *from,
915  	const char *type)
916  {
917  	struct device_node *np;
918  	unsigned long flags;
919  
920  	raw_spin_lock_irqsave(&devtree_lock, flags);
921  	for_each_of_allnodes_from(from, np)
922  		if (__of_node_is_type(np, type) && of_node_get(np))
923  			break;
924  	of_node_put(from);
925  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
926  	return np;
927  }
928  EXPORT_SYMBOL(of_find_node_by_type);
929  
930  /**
931   * of_find_compatible_node - Find a node based on type and one of the
932   *                                tokens in its "compatible" property
933   * @from:	The node to start searching from or NULL, the node
934   *		you pass will not be searched, only the next one
935   *		will; typically, you pass what the previous call
936   *		returned. of_node_put() will be called on it
937   * @type:	The type string to match "device_type" or NULL to ignore
938   * @compatible:	The string to match to one of the tokens in the device
939   *		"compatible" list.
940   *
941   * Return: A node pointer with refcount incremented, use
942   * of_node_put() on it when done.
943   */
of_find_compatible_node(struct device_node * from,const char * type,const char * compatible)944  struct device_node *of_find_compatible_node(struct device_node *from,
945  	const char *type, const char *compatible)
946  {
947  	struct device_node *np;
948  	unsigned long flags;
949  
950  	raw_spin_lock_irqsave(&devtree_lock, flags);
951  	for_each_of_allnodes_from(from, np)
952  		if (__of_device_is_compatible(np, compatible, type, NULL) &&
953  		    of_node_get(np))
954  			break;
955  	of_node_put(from);
956  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
957  	return np;
958  }
959  EXPORT_SYMBOL(of_find_compatible_node);
960  
961  /**
962   * of_find_node_with_property - Find a node which has a property with
963   *                              the given name.
964   * @from:	The node to start searching from or NULL, the node
965   *		you pass will not be searched, only the next one
966   *		will; typically, you pass what the previous call
967   *		returned. of_node_put() will be called on it
968   * @prop_name:	The name of the property to look for.
969   *
970   * Return: A node pointer with refcount incremented, use
971   * of_node_put() on it when done.
972   */
of_find_node_with_property(struct device_node * from,const char * prop_name)973  struct device_node *of_find_node_with_property(struct device_node *from,
974  	const char *prop_name)
975  {
976  	struct device_node *np;
977  	struct property *pp;
978  	unsigned long flags;
979  
980  	raw_spin_lock_irqsave(&devtree_lock, flags);
981  	for_each_of_allnodes_from(from, np) {
982  		for (pp = np->properties; pp; pp = pp->next) {
983  			if (of_prop_cmp(pp->name, prop_name) == 0) {
984  				of_node_get(np);
985  				goto out;
986  			}
987  		}
988  	}
989  out:
990  	of_node_put(from);
991  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
992  	return np;
993  }
994  EXPORT_SYMBOL(of_find_node_with_property);
995  
996  static
__of_match_node(const struct of_device_id * matches,const struct device_node * node)997  const struct of_device_id *__of_match_node(const struct of_device_id *matches,
998  					   const struct device_node *node)
999  {
1000  	const struct of_device_id *best_match = NULL;
1001  	int score, best_score = 0;
1002  
1003  	if (!matches)
1004  		return NULL;
1005  
1006  	for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1007  		score = __of_device_is_compatible(node, matches->compatible,
1008  						  matches->type, matches->name);
1009  		if (score > best_score) {
1010  			best_match = matches;
1011  			best_score = score;
1012  		}
1013  	}
1014  
1015  	return best_match;
1016  }
1017  
1018  /**
1019   * of_match_node - Tell if a device_node has a matching of_match structure
1020   * @matches:	array of of device match structures to search in
1021   * @node:	the of device structure to match against
1022   *
1023   * Low level utility function used by device matching.
1024   */
of_match_node(const struct of_device_id * matches,const struct device_node * node)1025  const struct of_device_id *of_match_node(const struct of_device_id *matches,
1026  					 const struct device_node *node)
1027  {
1028  	const struct of_device_id *match;
1029  	unsigned long flags;
1030  
1031  	raw_spin_lock_irqsave(&devtree_lock, flags);
1032  	match = __of_match_node(matches, node);
1033  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1034  	return match;
1035  }
1036  EXPORT_SYMBOL(of_match_node);
1037  
1038  /**
1039   * of_find_matching_node_and_match - Find a node based on an of_device_id
1040   *				     match table.
1041   * @from:	The node to start searching from or NULL, the node
1042   *		you pass will not be searched, only the next one
1043   *		will; typically, you pass what the previous call
1044   *		returned. of_node_put() will be called on it
1045   * @matches:	array of of device match structures to search in
1046   * @match:	Updated to point at the matches entry which matched
1047   *
1048   * Return: A node pointer with refcount incremented, use
1049   * of_node_put() on it when done.
1050   */
of_find_matching_node_and_match(struct device_node * from,const struct of_device_id * matches,const struct of_device_id ** match)1051  struct device_node *of_find_matching_node_and_match(struct device_node *from,
1052  					const struct of_device_id *matches,
1053  					const struct of_device_id **match)
1054  {
1055  	struct device_node *np;
1056  	const struct of_device_id *m;
1057  	unsigned long flags;
1058  
1059  	if (match)
1060  		*match = NULL;
1061  
1062  	raw_spin_lock_irqsave(&devtree_lock, flags);
1063  	for_each_of_allnodes_from(from, np) {
1064  		m = __of_match_node(matches, np);
1065  		if (m && of_node_get(np)) {
1066  			if (match)
1067  				*match = m;
1068  			break;
1069  		}
1070  	}
1071  	of_node_put(from);
1072  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1073  	return np;
1074  }
1075  EXPORT_SYMBOL(of_find_matching_node_and_match);
1076  
1077  /**
1078   * of_alias_from_compatible - Lookup appropriate alias for a device node
1079   *			      depending on compatible
1080   * @node:	pointer to a device tree node
1081   * @alias:	Pointer to buffer that alias value will be copied into
1082   * @len:	Length of alias value
1083   *
1084   * Based on the value of the compatible property, this routine will attempt
1085   * to choose an appropriate alias value for a particular device tree node.
1086   * It does this by stripping the manufacturer prefix (as delimited by a ',')
1087   * from the first entry in the compatible list property.
1088   *
1089   * Note: The matching on just the "product" side of the compatible is a relic
1090   * from I2C and SPI. Please do not add any new user.
1091   *
1092   * Return: This routine returns 0 on success, <0 on failure.
1093   */
of_alias_from_compatible(const struct device_node * node,char * alias,int len)1094  int of_alias_from_compatible(const struct device_node *node, char *alias, int len)
1095  {
1096  	const char *compatible, *p;
1097  	int cplen;
1098  
1099  	compatible = of_get_property(node, "compatible", &cplen);
1100  	if (!compatible || strlen(compatible) > cplen)
1101  		return -ENODEV;
1102  	p = strchr(compatible, ',');
1103  	strscpy(alias, p ? p + 1 : compatible, len);
1104  	return 0;
1105  }
1106  EXPORT_SYMBOL_GPL(of_alias_from_compatible);
1107  
1108  /**
1109   * of_find_node_by_phandle - Find a node given a phandle
1110   * @handle:	phandle of the node to find
1111   *
1112   * Return: A node pointer with refcount incremented, use
1113   * of_node_put() on it when done.
1114   */
of_find_node_by_phandle(phandle handle)1115  struct device_node *of_find_node_by_phandle(phandle handle)
1116  {
1117  	struct device_node *np = NULL;
1118  	unsigned long flags;
1119  	u32 handle_hash;
1120  
1121  	if (!handle)
1122  		return NULL;
1123  
1124  	handle_hash = of_phandle_cache_hash(handle);
1125  
1126  	raw_spin_lock_irqsave(&devtree_lock, flags);
1127  
1128  	if (phandle_cache[handle_hash] &&
1129  	    handle == phandle_cache[handle_hash]->phandle)
1130  		np = phandle_cache[handle_hash];
1131  
1132  	if (!np) {
1133  		for_each_of_allnodes(np)
1134  			if (np->phandle == handle &&
1135  			    !of_node_check_flag(np, OF_DETACHED)) {
1136  				phandle_cache[handle_hash] = np;
1137  				break;
1138  			}
1139  	}
1140  
1141  	of_node_get(np);
1142  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1143  	return np;
1144  }
1145  EXPORT_SYMBOL(of_find_node_by_phandle);
1146  
of_print_phandle_args(const char * msg,const struct of_phandle_args * args)1147  void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1148  {
1149  	int i;
1150  	printk("%s %pOF", msg, args->np);
1151  	for (i = 0; i < args->args_count; i++) {
1152  		const char delim = i ? ',' : ':';
1153  
1154  		pr_cont("%c%08x", delim, args->args[i]);
1155  	}
1156  	pr_cont("\n");
1157  }
1158  
of_phandle_iterator_init(struct of_phandle_iterator * it,const struct device_node * np,const char * list_name,const char * cells_name,int cell_count)1159  int of_phandle_iterator_init(struct of_phandle_iterator *it,
1160  		const struct device_node *np,
1161  		const char *list_name,
1162  		const char *cells_name,
1163  		int cell_count)
1164  {
1165  	const __be32 *list;
1166  	int size;
1167  
1168  	memset(it, 0, sizeof(*it));
1169  
1170  	/*
1171  	 * one of cell_count or cells_name must be provided to determine the
1172  	 * argument length.
1173  	 */
1174  	if (cell_count < 0 && !cells_name)
1175  		return -EINVAL;
1176  
1177  	list = of_get_property(np, list_name, &size);
1178  	if (!list)
1179  		return -ENOENT;
1180  
1181  	it->cells_name = cells_name;
1182  	it->cell_count = cell_count;
1183  	it->parent = np;
1184  	it->list_end = list + size / sizeof(*list);
1185  	it->phandle_end = list;
1186  	it->cur = list;
1187  
1188  	return 0;
1189  }
1190  EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1191  
of_phandle_iterator_next(struct of_phandle_iterator * it)1192  int of_phandle_iterator_next(struct of_phandle_iterator *it)
1193  {
1194  	uint32_t count = 0;
1195  
1196  	if (it->node) {
1197  		of_node_put(it->node);
1198  		it->node = NULL;
1199  	}
1200  
1201  	if (!it->cur || it->phandle_end >= it->list_end)
1202  		return -ENOENT;
1203  
1204  	it->cur = it->phandle_end;
1205  
1206  	/* If phandle is 0, then it is an empty entry with no arguments. */
1207  	it->phandle = be32_to_cpup(it->cur++);
1208  
1209  	if (it->phandle) {
1210  
1211  		/*
1212  		 * Find the provider node and parse the #*-cells property to
1213  		 * determine the argument length.
1214  		 */
1215  		it->node = of_find_node_by_phandle(it->phandle);
1216  
1217  		if (it->cells_name) {
1218  			if (!it->node) {
1219  				pr_err("%pOF: could not find phandle %d\n",
1220  				       it->parent, it->phandle);
1221  				goto err;
1222  			}
1223  
1224  			if (of_property_read_u32(it->node, it->cells_name,
1225  						 &count)) {
1226  				/*
1227  				 * If both cell_count and cells_name is given,
1228  				 * fall back to cell_count in absence
1229  				 * of the cells_name property
1230  				 */
1231  				if (it->cell_count >= 0) {
1232  					count = it->cell_count;
1233  				} else {
1234  					pr_err("%pOF: could not get %s for %pOF\n",
1235  					       it->parent,
1236  					       it->cells_name,
1237  					       it->node);
1238  					goto err;
1239  				}
1240  			}
1241  		} else {
1242  			count = it->cell_count;
1243  		}
1244  
1245  		/*
1246  		 * Make sure that the arguments actually fit in the remaining
1247  		 * property data length
1248  		 */
1249  		if (it->cur + count > it->list_end) {
1250  			if (it->cells_name)
1251  				pr_err("%pOF: %s = %d found %td\n",
1252  					it->parent, it->cells_name,
1253  					count, it->list_end - it->cur);
1254  			else
1255  				pr_err("%pOF: phandle %s needs %d, found %td\n",
1256  					it->parent, of_node_full_name(it->node),
1257  					count, it->list_end - it->cur);
1258  			goto err;
1259  		}
1260  	}
1261  
1262  	it->phandle_end = it->cur + count;
1263  	it->cur_count = count;
1264  
1265  	return 0;
1266  
1267  err:
1268  	if (it->node) {
1269  		of_node_put(it->node);
1270  		it->node = NULL;
1271  	}
1272  
1273  	return -EINVAL;
1274  }
1275  EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1276  
of_phandle_iterator_args(struct of_phandle_iterator * it,uint32_t * args,int size)1277  int of_phandle_iterator_args(struct of_phandle_iterator *it,
1278  			     uint32_t *args,
1279  			     int size)
1280  {
1281  	int i, count;
1282  
1283  	count = it->cur_count;
1284  
1285  	if (WARN_ON(size < count))
1286  		count = size;
1287  
1288  	for (i = 0; i < count; i++)
1289  		args[i] = be32_to_cpup(it->cur++);
1290  
1291  	return count;
1292  }
1293  
__of_parse_phandle_with_args(const struct device_node * np,const char * list_name,const char * cells_name,int cell_count,int index,struct of_phandle_args * out_args)1294  int __of_parse_phandle_with_args(const struct device_node *np,
1295  				 const char *list_name,
1296  				 const char *cells_name,
1297  				 int cell_count, int index,
1298  				 struct of_phandle_args *out_args)
1299  {
1300  	struct of_phandle_iterator it;
1301  	int rc, cur_index = 0;
1302  
1303  	if (index < 0)
1304  		return -EINVAL;
1305  
1306  	/* Loop over the phandles until all the requested entry is found */
1307  	of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1308  		/*
1309  		 * All of the error cases bail out of the loop, so at
1310  		 * this point, the parsing is successful. If the requested
1311  		 * index matches, then fill the out_args structure and return,
1312  		 * or return -ENOENT for an empty entry.
1313  		 */
1314  		rc = -ENOENT;
1315  		if (cur_index == index) {
1316  			if (!it.phandle)
1317  				goto err;
1318  
1319  			if (out_args) {
1320  				int c;
1321  
1322  				c = of_phandle_iterator_args(&it,
1323  							     out_args->args,
1324  							     MAX_PHANDLE_ARGS);
1325  				out_args->np = it.node;
1326  				out_args->args_count = c;
1327  			} else {
1328  				of_node_put(it.node);
1329  			}
1330  
1331  			/* Found it! return success */
1332  			return 0;
1333  		}
1334  
1335  		cur_index++;
1336  	}
1337  
1338  	/*
1339  	 * Unlock node before returning result; will be one of:
1340  	 * -ENOENT : index is for empty phandle
1341  	 * -EINVAL : parsing error on data
1342  	 */
1343  
1344   err:
1345  	of_node_put(it.node);
1346  	return rc;
1347  }
1348  EXPORT_SYMBOL(__of_parse_phandle_with_args);
1349  
1350  /**
1351   * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
1352   * @np:		pointer to a device tree node containing a list
1353   * @list_name:	property name that contains a list
1354   * @stem_name:	stem of property names that specify phandles' arguments count
1355   * @index:	index of a phandle to parse out
1356   * @out_args:	optional pointer to output arguments structure (will be filled)
1357   *
1358   * This function is useful to parse lists of phandles and their arguments.
1359   * Returns 0 on success and fills out_args, on error returns appropriate errno
1360   * value. The difference between this function and of_parse_phandle_with_args()
1361   * is that this API remaps a phandle if the node the phandle points to has
1362   * a <@stem_name>-map property.
1363   *
1364   * Caller is responsible to call of_node_put() on the returned out_args->np
1365   * pointer.
1366   *
1367   * Example::
1368   *
1369   *  phandle1: node1 {
1370   *  	#list-cells = <2>;
1371   *  };
1372   *
1373   *  phandle2: node2 {
1374   *  	#list-cells = <1>;
1375   *  };
1376   *
1377   *  phandle3: node3 {
1378   *  	#list-cells = <1>;
1379   *  	list-map = <0 &phandle2 3>,
1380   *  		   <1 &phandle2 2>,
1381   *  		   <2 &phandle1 5 1>;
1382   *  	list-map-mask = <0x3>;
1383   *  };
1384   *
1385   *  node4 {
1386   *  	list = <&phandle1 1 2 &phandle3 0>;
1387   *  };
1388   *
1389   * To get a device_node of the ``node2`` node you may call this:
1390   * of_parse_phandle_with_args(node4, "list", "list", 1, &args);
1391   */
of_parse_phandle_with_args_map(const struct device_node * np,const char * list_name,const char * stem_name,int index,struct of_phandle_args * out_args)1392  int of_parse_phandle_with_args_map(const struct device_node *np,
1393  				   const char *list_name,
1394  				   const char *stem_name,
1395  				   int index, struct of_phandle_args *out_args)
1396  {
1397  	char *cells_name __free(kfree) = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1398  	char *map_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1399  	char *mask_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1400  	char *pass_name __free(kfree) = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1401  	struct device_node *cur, *new = NULL;
1402  	const __be32 *map, *mask, *pass;
1403  	static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(~0) };
1404  	static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = cpu_to_be32(0) };
1405  	__be32 initial_match_array[MAX_PHANDLE_ARGS];
1406  	const __be32 *match_array = initial_match_array;
1407  	int i, ret, map_len, match;
1408  	u32 list_size, new_size;
1409  
1410  	if (index < 0)
1411  		return -EINVAL;
1412  
1413  	if (!cells_name || !map_name || !mask_name || !pass_name)
1414  		return -ENOMEM;
1415  
1416  	ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
1417  					   out_args);
1418  	if (ret)
1419  		return ret;
1420  
1421  	/* Get the #<list>-cells property */
1422  	cur = out_args->np;
1423  	ret = of_property_read_u32(cur, cells_name, &list_size);
1424  	if (ret < 0)
1425  		goto put;
1426  
1427  	/* Precalculate the match array - this simplifies match loop */
1428  	for (i = 0; i < list_size; i++)
1429  		initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1430  
1431  	ret = -EINVAL;
1432  	while (cur) {
1433  		/* Get the <list>-map property */
1434  		map = of_get_property(cur, map_name, &map_len);
1435  		if (!map) {
1436  			return 0;
1437  		}
1438  		map_len /= sizeof(u32);
1439  
1440  		/* Get the <list>-map-mask property (optional) */
1441  		mask = of_get_property(cur, mask_name, NULL);
1442  		if (!mask)
1443  			mask = dummy_mask;
1444  		/* Iterate through <list>-map property */
1445  		match = 0;
1446  		while (map_len > (list_size + 1) && !match) {
1447  			/* Compare specifiers */
1448  			match = 1;
1449  			for (i = 0; i < list_size; i++, map_len--)
1450  				match &= !((match_array[i] ^ *map++) & mask[i]);
1451  
1452  			of_node_put(new);
1453  			new = of_find_node_by_phandle(be32_to_cpup(map));
1454  			map++;
1455  			map_len--;
1456  
1457  			/* Check if not found */
1458  			if (!new)
1459  				goto put;
1460  
1461  			if (!of_device_is_available(new))
1462  				match = 0;
1463  
1464  			ret = of_property_read_u32(new, cells_name, &new_size);
1465  			if (ret)
1466  				goto put;
1467  
1468  			/* Check for malformed properties */
1469  			if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1470  				goto put;
1471  			if (map_len < new_size)
1472  				goto put;
1473  
1474  			/* Move forward by new node's #<list>-cells amount */
1475  			map += new_size;
1476  			map_len -= new_size;
1477  		}
1478  		if (!match)
1479  			goto put;
1480  
1481  		/* Get the <list>-map-pass-thru property (optional) */
1482  		pass = of_get_property(cur, pass_name, NULL);
1483  		if (!pass)
1484  			pass = dummy_pass;
1485  
1486  		/*
1487  		 * Successfully parsed a <list>-map translation; copy new
1488  		 * specifier into the out_args structure, keeping the
1489  		 * bits specified in <list>-map-pass-thru.
1490  		 */
1491  		match_array = map - new_size;
1492  		for (i = 0; i < new_size; i++) {
1493  			__be32 val = *(map - new_size + i);
1494  
1495  			if (i < list_size) {
1496  				val &= ~pass[i];
1497  				val |= cpu_to_be32(out_args->args[i]) & pass[i];
1498  			}
1499  
1500  			out_args->args[i] = be32_to_cpu(val);
1501  		}
1502  		out_args->args_count = list_size = new_size;
1503  		/* Iterate again with new provider */
1504  		out_args->np = new;
1505  		of_node_put(cur);
1506  		cur = new;
1507  		new = NULL;
1508  	}
1509  put:
1510  	of_node_put(cur);
1511  	of_node_put(new);
1512  	return ret;
1513  }
1514  EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1515  
1516  /**
1517   * of_count_phandle_with_args() - Find the number of phandles references in a property
1518   * @np:		pointer to a device tree node containing a list
1519   * @list_name:	property name that contains a list
1520   * @cells_name:	property name that specifies phandles' arguments count
1521   *
1522   * Return: The number of phandle + argument tuples within a property. It
1523   * is a typical pattern to encode a list of phandle and variable
1524   * arguments into a single property. The number of arguments is encoded
1525   * by a property in the phandle-target node. For example, a gpios
1526   * property would contain a list of GPIO specifies consisting of a
1527   * phandle and 1 or more arguments. The number of arguments are
1528   * determined by the #gpio-cells property in the node pointed to by the
1529   * phandle.
1530   */
of_count_phandle_with_args(const struct device_node * np,const char * list_name,const char * cells_name)1531  int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1532  				const char *cells_name)
1533  {
1534  	struct of_phandle_iterator it;
1535  	int rc, cur_index = 0;
1536  
1537  	/*
1538  	 * If cells_name is NULL we assume a cell count of 0. This makes
1539  	 * counting the phandles trivial as each 32bit word in the list is a
1540  	 * phandle and no arguments are to consider. So we don't iterate through
1541  	 * the list but just use the length to determine the phandle count.
1542  	 */
1543  	if (!cells_name) {
1544  		const __be32 *list;
1545  		int size;
1546  
1547  		list = of_get_property(np, list_name, &size);
1548  		if (!list)
1549  			return -ENOENT;
1550  
1551  		return size / sizeof(*list);
1552  	}
1553  
1554  	rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
1555  	if (rc)
1556  		return rc;
1557  
1558  	while ((rc = of_phandle_iterator_next(&it)) == 0)
1559  		cur_index += 1;
1560  
1561  	if (rc != -ENOENT)
1562  		return rc;
1563  
1564  	return cur_index;
1565  }
1566  EXPORT_SYMBOL(of_count_phandle_with_args);
1567  
__of_remove_property_from_list(struct property ** list,struct property * prop)1568  static struct property *__of_remove_property_from_list(struct property **list, struct property *prop)
1569  {
1570  	struct property **next;
1571  
1572  	for (next = list; *next; next = &(*next)->next) {
1573  		if (*next == prop) {
1574  			*next = prop->next;
1575  			prop->next = NULL;
1576  			return prop;
1577  		}
1578  	}
1579  	return NULL;
1580  }
1581  
1582  /**
1583   * __of_add_property - Add a property to a node without lock operations
1584   * @np:		Caller's Device Node
1585   * @prop:	Property to add
1586   */
__of_add_property(struct device_node * np,struct property * prop)1587  int __of_add_property(struct device_node *np, struct property *prop)
1588  {
1589  	int rc = 0;
1590  	unsigned long flags;
1591  	struct property **next;
1592  
1593  	raw_spin_lock_irqsave(&devtree_lock, flags);
1594  
1595  	__of_remove_property_from_list(&np->deadprops, prop);
1596  
1597  	prop->next = NULL;
1598  	next = &np->properties;
1599  	while (*next) {
1600  		if (strcmp(prop->name, (*next)->name) == 0) {
1601  			/* duplicate ! don't insert it */
1602  			rc = -EEXIST;
1603  			goto out_unlock;
1604  		}
1605  		next = &(*next)->next;
1606  	}
1607  	*next = prop;
1608  
1609  out_unlock:
1610  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1611  	if (rc)
1612  		return rc;
1613  
1614  	__of_add_property_sysfs(np, prop);
1615  	return 0;
1616  }
1617  
1618  /**
1619   * of_add_property - Add a property to a node
1620   * @np:		Caller's Device Node
1621   * @prop:	Property to add
1622   */
of_add_property(struct device_node * np,struct property * prop)1623  int of_add_property(struct device_node *np, struct property *prop)
1624  {
1625  	int rc;
1626  
1627  	mutex_lock(&of_mutex);
1628  	rc = __of_add_property(np, prop);
1629  	mutex_unlock(&of_mutex);
1630  
1631  	if (!rc)
1632  		of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1633  
1634  	return rc;
1635  }
1636  EXPORT_SYMBOL_GPL(of_add_property);
1637  
__of_remove_property(struct device_node * np,struct property * prop)1638  int __of_remove_property(struct device_node *np, struct property *prop)
1639  {
1640  	unsigned long flags;
1641  	int rc = -ENODEV;
1642  
1643  	raw_spin_lock_irqsave(&devtree_lock, flags);
1644  
1645  	if (__of_remove_property_from_list(&np->properties, prop)) {
1646  		/* Found the property, add it to deadprops list */
1647  		prop->next = np->deadprops;
1648  		np->deadprops = prop;
1649  		rc = 0;
1650  	}
1651  
1652  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1653  	if (rc)
1654  		return rc;
1655  
1656  	__of_remove_property_sysfs(np, prop);
1657  	return 0;
1658  }
1659  
1660  /**
1661   * of_remove_property - Remove a property from a node.
1662   * @np:		Caller's Device Node
1663   * @prop:	Property to remove
1664   *
1665   * Note that we don't actually remove it, since we have given out
1666   * who-knows-how-many pointers to the data using get-property.
1667   * Instead we just move the property to the "dead properties"
1668   * list, so it won't be found any more.
1669   */
of_remove_property(struct device_node * np,struct property * prop)1670  int of_remove_property(struct device_node *np, struct property *prop)
1671  {
1672  	int rc;
1673  
1674  	if (!prop)
1675  		return -ENODEV;
1676  
1677  	mutex_lock(&of_mutex);
1678  	rc = __of_remove_property(np, prop);
1679  	mutex_unlock(&of_mutex);
1680  
1681  	if (!rc)
1682  		of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1683  
1684  	return rc;
1685  }
1686  EXPORT_SYMBOL_GPL(of_remove_property);
1687  
__of_update_property(struct device_node * np,struct property * newprop,struct property ** oldpropp)1688  int __of_update_property(struct device_node *np, struct property *newprop,
1689  		struct property **oldpropp)
1690  {
1691  	struct property **next, *oldprop;
1692  	unsigned long flags;
1693  
1694  	raw_spin_lock_irqsave(&devtree_lock, flags);
1695  
1696  	__of_remove_property_from_list(&np->deadprops, newprop);
1697  
1698  	for (next = &np->properties; *next; next = &(*next)->next) {
1699  		if (of_prop_cmp((*next)->name, newprop->name) == 0)
1700  			break;
1701  	}
1702  	*oldpropp = oldprop = *next;
1703  
1704  	if (oldprop) {
1705  		/* replace the node */
1706  		newprop->next = oldprop->next;
1707  		*next = newprop;
1708  		oldprop->next = np->deadprops;
1709  		np->deadprops = oldprop;
1710  	} else {
1711  		/* new node */
1712  		newprop->next = NULL;
1713  		*next = newprop;
1714  	}
1715  
1716  	raw_spin_unlock_irqrestore(&devtree_lock, flags);
1717  
1718  	__of_update_property_sysfs(np, newprop, oldprop);
1719  
1720  	return 0;
1721  }
1722  
1723  /*
1724   * of_update_property - Update a property in a node, if the property does
1725   * not exist, add it.
1726   *
1727   * Note that we don't actually remove it, since we have given out
1728   * who-knows-how-many pointers to the data using get-property.
1729   * Instead we just move the property to the "dead properties" list,
1730   * and add the new property to the property list
1731   */
of_update_property(struct device_node * np,struct property * newprop)1732  int of_update_property(struct device_node *np, struct property *newprop)
1733  {
1734  	struct property *oldprop;
1735  	int rc;
1736  
1737  	if (!newprop->name)
1738  		return -EINVAL;
1739  
1740  	mutex_lock(&of_mutex);
1741  	rc = __of_update_property(np, newprop, &oldprop);
1742  	mutex_unlock(&of_mutex);
1743  
1744  	if (!rc)
1745  		of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1746  
1747  	return rc;
1748  }
1749  
of_alias_add(struct alias_prop * ap,struct device_node * np,int id,const char * stem,int stem_len)1750  static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1751  			 int id, const char *stem, int stem_len)
1752  {
1753  	ap->np = np;
1754  	ap->id = id;
1755  	strscpy(ap->stem, stem, stem_len + 1);
1756  	list_add_tail(&ap->link, &aliases_lookup);
1757  	pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1758  		 ap->alias, ap->stem, ap->id, np);
1759  }
1760  
1761  /**
1762   * of_alias_scan - Scan all properties of the 'aliases' node
1763   * @dt_alloc:	An allocator that provides a virtual address to memory
1764   *		for storing the resulting tree
1765   *
1766   * The function scans all the properties of the 'aliases' node and populates
1767   * the global lookup table with the properties.  It returns the
1768   * number of alias properties found, or an error code in case of failure.
1769   */
of_alias_scan(void * (* dt_alloc)(u64 size,u64 align))1770  void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1771  {
1772  	struct property *pp;
1773  
1774  	of_aliases = of_find_node_by_path("/aliases");
1775  	of_chosen = of_find_node_by_path("/chosen");
1776  	if (of_chosen == NULL)
1777  		of_chosen = of_find_node_by_path("/chosen@0");
1778  
1779  	if (of_chosen) {
1780  		/* linux,stdout-path and /aliases/stdout are for legacy compatibility */
1781  		const char *name = NULL;
1782  
1783  		if (of_property_read_string(of_chosen, "stdout-path", &name))
1784  			of_property_read_string(of_chosen, "linux,stdout-path",
1785  						&name);
1786  		if (IS_ENABLED(CONFIG_PPC) && !name)
1787  			of_property_read_string(of_aliases, "stdout", &name);
1788  		if (name)
1789  			of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1790  		if (of_stdout)
1791  			of_stdout->fwnode.flags |= FWNODE_FLAG_BEST_EFFORT;
1792  	}
1793  
1794  	if (!of_aliases)
1795  		return;
1796  
1797  	for_each_property_of_node(of_aliases, pp) {
1798  		const char *start = pp->name;
1799  		const char *end = start + strlen(start);
1800  		struct device_node *np;
1801  		struct alias_prop *ap;
1802  		int id, len;
1803  
1804  		/* Skip those we do not want to proceed */
1805  		if (!strcmp(pp->name, "name") ||
1806  		    !strcmp(pp->name, "phandle") ||
1807  		    !strcmp(pp->name, "linux,phandle"))
1808  			continue;
1809  
1810  		np = of_find_node_by_path(pp->value);
1811  		if (!np)
1812  			continue;
1813  
1814  		/* walk the alias backwards to extract the id and work out
1815  		 * the 'stem' string */
1816  		while (isdigit(*(end-1)) && end > start)
1817  			end--;
1818  		len = end - start;
1819  
1820  		if (kstrtoint(end, 10, &id) < 0)
1821  			continue;
1822  
1823  		/* Allocate an alias_prop with enough space for the stem */
1824  		ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
1825  		if (!ap)
1826  			continue;
1827  		memset(ap, 0, sizeof(*ap) + len + 1);
1828  		ap->alias = start;
1829  		of_alias_add(ap, np, id, start, len);
1830  	}
1831  }
1832  
1833  /**
1834   * of_alias_get_id - Get alias id for the given device_node
1835   * @np:		Pointer to the given device_node
1836   * @stem:	Alias stem of the given device_node
1837   *
1838   * The function travels the lookup table to get the alias id for the given
1839   * device_node and alias stem.
1840   *
1841   * Return: The alias id if found.
1842   */
of_alias_get_id(struct device_node * np,const char * stem)1843  int of_alias_get_id(struct device_node *np, const char *stem)
1844  {
1845  	struct alias_prop *app;
1846  	int id = -ENODEV;
1847  
1848  	mutex_lock(&of_mutex);
1849  	list_for_each_entry(app, &aliases_lookup, link) {
1850  		if (strcmp(app->stem, stem) != 0)
1851  			continue;
1852  
1853  		if (np == app->np) {
1854  			id = app->id;
1855  			break;
1856  		}
1857  	}
1858  	mutex_unlock(&of_mutex);
1859  
1860  	return id;
1861  }
1862  EXPORT_SYMBOL_GPL(of_alias_get_id);
1863  
1864  /**
1865   * of_alias_get_highest_id - Get highest alias id for the given stem
1866   * @stem:	Alias stem to be examined
1867   *
1868   * The function travels the lookup table to get the highest alias id for the
1869   * given alias stem.  It returns the alias id if found.
1870   */
of_alias_get_highest_id(const char * stem)1871  int of_alias_get_highest_id(const char *stem)
1872  {
1873  	struct alias_prop *app;
1874  	int id = -ENODEV;
1875  
1876  	mutex_lock(&of_mutex);
1877  	list_for_each_entry(app, &aliases_lookup, link) {
1878  		if (strcmp(app->stem, stem) != 0)
1879  			continue;
1880  
1881  		if (app->id > id)
1882  			id = app->id;
1883  	}
1884  	mutex_unlock(&of_mutex);
1885  
1886  	return id;
1887  }
1888  EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
1889  
1890  /**
1891   * of_console_check() - Test and setup console for DT setup
1892   * @dn: Pointer to device node
1893   * @name: Name to use for preferred console without index. ex. "ttyS"
1894   * @index: Index to use for preferred console.
1895   *
1896   * Check if the given device node matches the stdout-path property in the
1897   * /chosen node. If it does then register it as the preferred console.
1898   *
1899   * Return: TRUE if console successfully setup. Otherwise return FALSE.
1900   */
of_console_check(struct device_node * dn,char * name,int index)1901  bool of_console_check(struct device_node *dn, char *name, int index)
1902  {
1903  	if (!dn || dn != of_stdout || console_set_on_cmdline)
1904  		return false;
1905  
1906  	/*
1907  	 * XXX: cast `options' to char pointer to suppress complication
1908  	 * warnings: printk, UART and console drivers expect char pointer.
1909  	 */
1910  	return !add_preferred_console(name, index, (char *)of_stdout_options);
1911  }
1912  EXPORT_SYMBOL_GPL(of_console_check);
1913  
1914  /**
1915   * of_find_next_cache_node - Find a node's subsidiary cache
1916   * @np:	node of type "cpu" or "cache"
1917   *
1918   * Return: A node pointer with refcount incremented, use
1919   * of_node_put() on it when done.  Caller should hold a reference
1920   * to np.
1921   */
of_find_next_cache_node(const struct device_node * np)1922  struct device_node *of_find_next_cache_node(const struct device_node *np)
1923  {
1924  	struct device_node *child, *cache_node;
1925  
1926  	cache_node = of_parse_phandle(np, "l2-cache", 0);
1927  	if (!cache_node)
1928  		cache_node = of_parse_phandle(np, "next-level-cache", 0);
1929  
1930  	if (cache_node)
1931  		return cache_node;
1932  
1933  	/* OF on pmac has nodes instead of properties named "l2-cache"
1934  	 * beneath CPU nodes.
1935  	 */
1936  	if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
1937  		for_each_child_of_node(np, child)
1938  			if (of_node_is_type(child, "cache"))
1939  				return child;
1940  
1941  	return NULL;
1942  }
1943  
1944  /**
1945   * of_find_last_cache_level - Find the level at which the last cache is
1946   * 		present for the given logical cpu
1947   *
1948   * @cpu: cpu number(logical index) for which the last cache level is needed
1949   *
1950   * Return: The level at which the last cache is present. It is exactly
1951   * same as  the total number of cache levels for the given logical cpu.
1952   */
of_find_last_cache_level(unsigned int cpu)1953  int of_find_last_cache_level(unsigned int cpu)
1954  {
1955  	u32 cache_level = 0;
1956  	struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
1957  
1958  	while (np) {
1959  		of_node_put(prev);
1960  		prev = np;
1961  		np = of_find_next_cache_node(np);
1962  	}
1963  
1964  	of_property_read_u32(prev, "cache-level", &cache_level);
1965  	of_node_put(prev);
1966  
1967  	return cache_level;
1968  }
1969  
1970  /**
1971   * of_map_id - Translate an ID through a downstream mapping.
1972   * @np: root complex device node.
1973   * @id: device ID to map.
1974   * @map_name: property name of the map to use.
1975   * @map_mask_name: optional property name of the mask to use.
1976   * @target: optional pointer to a target device node.
1977   * @id_out: optional pointer to receive the translated ID.
1978   *
1979   * Given a device ID, look up the appropriate implementation-defined
1980   * platform ID and/or the target device which receives transactions on that
1981   * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
1982   * @id_out may be NULL if only the other is required. If @target points to
1983   * a non-NULL device node pointer, only entries targeting that node will be
1984   * matched; if it points to a NULL value, it will receive the device node of
1985   * the first matching target phandle, with a reference held.
1986   *
1987   * Return: 0 on success or a standard error code on failure.
1988   */
of_map_id(struct device_node * np,u32 id,const char * map_name,const char * map_mask_name,struct device_node ** target,u32 * id_out)1989  int of_map_id(struct device_node *np, u32 id,
1990  	       const char *map_name, const char *map_mask_name,
1991  	       struct device_node **target, u32 *id_out)
1992  {
1993  	u32 map_mask, masked_id;
1994  	int map_len;
1995  	const __be32 *map = NULL;
1996  
1997  	if (!np || !map_name || (!target && !id_out))
1998  		return -EINVAL;
1999  
2000  	map = of_get_property(np, map_name, &map_len);
2001  	if (!map) {
2002  		if (target)
2003  			return -ENODEV;
2004  		/* Otherwise, no map implies no translation */
2005  		*id_out = id;
2006  		return 0;
2007  	}
2008  
2009  	if (!map_len || map_len % (4 * sizeof(*map))) {
2010  		pr_err("%pOF: Error: Bad %s length: %d\n", np,
2011  			map_name, map_len);
2012  		return -EINVAL;
2013  	}
2014  
2015  	/* The default is to select all bits. */
2016  	map_mask = 0xffffffff;
2017  
2018  	/*
2019  	 * Can be overridden by "{iommu,msi}-map-mask" property.
2020  	 * If of_property_read_u32() fails, the default is used.
2021  	 */
2022  	if (map_mask_name)
2023  		of_property_read_u32(np, map_mask_name, &map_mask);
2024  
2025  	masked_id = map_mask & id;
2026  	for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2027  		struct device_node *phandle_node;
2028  		u32 id_base = be32_to_cpup(map + 0);
2029  		u32 phandle = be32_to_cpup(map + 1);
2030  		u32 out_base = be32_to_cpup(map + 2);
2031  		u32 id_len = be32_to_cpup(map + 3);
2032  
2033  		if (id_base & ~map_mask) {
2034  			pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2035  				np, map_name, map_name,
2036  				map_mask, id_base);
2037  			return -EFAULT;
2038  		}
2039  
2040  		if (masked_id < id_base || masked_id >= id_base + id_len)
2041  			continue;
2042  
2043  		phandle_node = of_find_node_by_phandle(phandle);
2044  		if (!phandle_node)
2045  			return -ENODEV;
2046  
2047  		if (target) {
2048  			if (*target)
2049  				of_node_put(phandle_node);
2050  			else
2051  				*target = phandle_node;
2052  
2053  			if (*target != phandle_node)
2054  				continue;
2055  		}
2056  
2057  		if (id_out)
2058  			*id_out = masked_id - id_base + out_base;
2059  
2060  		pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2061  			np, map_name, map_mask, id_base, out_base,
2062  			id_len, id, masked_id - id_base + out_base);
2063  		return 0;
2064  	}
2065  
2066  	pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2067  		id, target && *target ? *target : NULL);
2068  
2069  	/* Bypasses translation */
2070  	if (id_out)
2071  		*id_out = id;
2072  	return 0;
2073  }
2074  EXPORT_SYMBOL_GPL(of_map_id);
2075