1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *	linux/kernel/resource.c
4   *
5   * Copyright (C) 1999	Linus Torvalds
6   * Copyright (C) 1999	Martin Mares <mj@ucw.cz>
7   *
8   * Arbitrary resource management.
9   */
10  
11  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12  
13  #include <linux/export.h>
14  #include <linux/errno.h>
15  #include <linux/ioport.h>
16  #include <linux/init.h>
17  #include <linux/slab.h>
18  #include <linux/spinlock.h>
19  #include <linux/fs.h>
20  #include <linux/proc_fs.h>
21  #include <linux/pseudo_fs.h>
22  #include <linux/sched.h>
23  #include <linux/seq_file.h>
24  #include <linux/device.h>
25  #include <linux/pfn.h>
26  #include <linux/mm.h>
27  #include <linux/mount.h>
28  #include <linux/resource_ext.h>
29  #include <uapi/linux/magic.h>
30  #include <linux/string.h>
31  #include <linux/vmalloc.h>
32  #include <asm/io.h>
33  
34  
35  struct resource ioport_resource = {
36  	.name	= "PCI IO",
37  	.start	= 0,
38  	.end	= IO_SPACE_LIMIT,
39  	.flags	= IORESOURCE_IO,
40  };
41  EXPORT_SYMBOL(ioport_resource);
42  
43  struct resource iomem_resource = {
44  	.name	= "PCI mem",
45  	.start	= 0,
46  	.end	= -1,
47  	.flags	= IORESOURCE_MEM,
48  };
49  EXPORT_SYMBOL(iomem_resource);
50  
51  static DEFINE_RWLOCK(resource_lock);
52  
next_resource(struct resource * p,bool skip_children)53  static struct resource *next_resource(struct resource *p, bool skip_children)
54  {
55  	if (!skip_children && p->child)
56  		return p->child;
57  	while (!p->sibling && p->parent)
58  		p = p->parent;
59  	return p->sibling;
60  }
61  
62  #define for_each_resource(_root, _p, _skip_children) \
63  	for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children))
64  
65  #ifdef CONFIG_PROC_FS
66  
67  enum { MAX_IORES_LEVEL = 5 };
68  
r_start(struct seq_file * m,loff_t * pos)69  static void *r_start(struct seq_file *m, loff_t *pos)
70  	__acquires(resource_lock)
71  {
72  	struct resource *root = pde_data(file_inode(m->file));
73  	struct resource *p;
74  	loff_t l = *pos;
75  
76  	read_lock(&resource_lock);
77  	for_each_resource(root, p, false) {
78  		if (l-- == 0)
79  			break;
80  	}
81  
82  	return p;
83  }
84  
r_next(struct seq_file * m,void * v,loff_t * pos)85  static void *r_next(struct seq_file *m, void *v, loff_t *pos)
86  {
87  	struct resource *p = v;
88  
89  	(*pos)++;
90  
91  	return (void *)next_resource(p, false);
92  }
93  
r_stop(struct seq_file * m,void * v)94  static void r_stop(struct seq_file *m, void *v)
95  	__releases(resource_lock)
96  {
97  	read_unlock(&resource_lock);
98  }
99  
r_show(struct seq_file * m,void * v)100  static int r_show(struct seq_file *m, void *v)
101  {
102  	struct resource *root = pde_data(file_inode(m->file));
103  	struct resource *r = v, *p;
104  	unsigned long long start, end;
105  	int width = root->end < 0x10000 ? 4 : 8;
106  	int depth;
107  
108  	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
109  		if (p->parent == root)
110  			break;
111  
112  	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
113  		start = r->start;
114  		end = r->end;
115  	} else {
116  		start = end = 0;
117  	}
118  
119  	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
120  			depth * 2, "",
121  			width, start,
122  			width, end,
123  			r->name ? r->name : "<BAD>");
124  	return 0;
125  }
126  
127  static const struct seq_operations resource_op = {
128  	.start	= r_start,
129  	.next	= r_next,
130  	.stop	= r_stop,
131  	.show	= r_show,
132  };
133  
ioresources_init(void)134  static int __init ioresources_init(void)
135  {
136  	proc_create_seq_data("ioports", 0, NULL, &resource_op,
137  			&ioport_resource);
138  	proc_create_seq_data("iomem", 0, NULL, &resource_op, &iomem_resource);
139  	return 0;
140  }
141  __initcall(ioresources_init);
142  
143  #endif /* CONFIG_PROC_FS */
144  
free_resource(struct resource * res)145  static void free_resource(struct resource *res)
146  {
147  	/**
148  	 * If the resource was allocated using memblock early during boot
149  	 * we'll leak it here: we can only return full pages back to the
150  	 * buddy and trying to be smart and reusing them eventually in
151  	 * alloc_resource() overcomplicates resource handling.
152  	 */
153  	if (res && PageSlab(virt_to_head_page(res)))
154  		kfree(res);
155  }
156  
alloc_resource(gfp_t flags)157  static struct resource *alloc_resource(gfp_t flags)
158  {
159  	return kzalloc(sizeof(struct resource), flags);
160  }
161  
162  /* Return the conflict entry if you can't request it */
__request_resource(struct resource * root,struct resource * new)163  static struct resource * __request_resource(struct resource *root, struct resource *new)
164  {
165  	resource_size_t start = new->start;
166  	resource_size_t end = new->end;
167  	struct resource *tmp, **p;
168  
169  	if (end < start)
170  		return root;
171  	if (start < root->start)
172  		return root;
173  	if (end > root->end)
174  		return root;
175  	p = &root->child;
176  	for (;;) {
177  		tmp = *p;
178  		if (!tmp || tmp->start > end) {
179  			new->sibling = tmp;
180  			*p = new;
181  			new->parent = root;
182  			return NULL;
183  		}
184  		p = &tmp->sibling;
185  		if (tmp->end < start)
186  			continue;
187  		return tmp;
188  	}
189  }
190  
__release_resource(struct resource * old,bool release_child)191  static int __release_resource(struct resource *old, bool release_child)
192  {
193  	struct resource *tmp, **p, *chd;
194  
195  	p = &old->parent->child;
196  	for (;;) {
197  		tmp = *p;
198  		if (!tmp)
199  			break;
200  		if (tmp == old) {
201  			if (release_child || !(tmp->child)) {
202  				*p = tmp->sibling;
203  			} else {
204  				for (chd = tmp->child;; chd = chd->sibling) {
205  					chd->parent = tmp->parent;
206  					if (!(chd->sibling))
207  						break;
208  				}
209  				*p = tmp->child;
210  				chd->sibling = tmp->sibling;
211  			}
212  			old->parent = NULL;
213  			return 0;
214  		}
215  		p = &tmp->sibling;
216  	}
217  	return -EINVAL;
218  }
219  
__release_child_resources(struct resource * r)220  static void __release_child_resources(struct resource *r)
221  {
222  	struct resource *tmp, *p;
223  	resource_size_t size;
224  
225  	p = r->child;
226  	r->child = NULL;
227  	while (p) {
228  		tmp = p;
229  		p = p->sibling;
230  
231  		tmp->parent = NULL;
232  		tmp->sibling = NULL;
233  		__release_child_resources(tmp);
234  
235  		printk(KERN_DEBUG "release child resource %pR\n", tmp);
236  		/* need to restore size, and keep flags */
237  		size = resource_size(tmp);
238  		tmp->start = 0;
239  		tmp->end = size - 1;
240  	}
241  }
242  
release_child_resources(struct resource * r)243  void release_child_resources(struct resource *r)
244  {
245  	write_lock(&resource_lock);
246  	__release_child_resources(r);
247  	write_unlock(&resource_lock);
248  }
249  
250  /**
251   * request_resource_conflict - request and reserve an I/O or memory resource
252   * @root: root resource descriptor
253   * @new: resource descriptor desired by caller
254   *
255   * Returns 0 for success, conflict resource on error.
256   */
request_resource_conflict(struct resource * root,struct resource * new)257  struct resource *request_resource_conflict(struct resource *root, struct resource *new)
258  {
259  	struct resource *conflict;
260  
261  	write_lock(&resource_lock);
262  	conflict = __request_resource(root, new);
263  	write_unlock(&resource_lock);
264  	return conflict;
265  }
266  
267  /**
268   * request_resource - request and reserve an I/O or memory resource
269   * @root: root resource descriptor
270   * @new: resource descriptor desired by caller
271   *
272   * Returns 0 for success, negative error code on error.
273   */
request_resource(struct resource * root,struct resource * new)274  int request_resource(struct resource *root, struct resource *new)
275  {
276  	struct resource *conflict;
277  
278  	conflict = request_resource_conflict(root, new);
279  	return conflict ? -EBUSY : 0;
280  }
281  
282  EXPORT_SYMBOL(request_resource);
283  
284  /**
285   * release_resource - release a previously reserved resource
286   * @old: resource pointer
287   */
release_resource(struct resource * old)288  int release_resource(struct resource *old)
289  {
290  	int retval;
291  
292  	write_lock(&resource_lock);
293  	retval = __release_resource(old, true);
294  	write_unlock(&resource_lock);
295  	return retval;
296  }
297  
298  EXPORT_SYMBOL(release_resource);
299  
300  /**
301   * find_next_iomem_res - Finds the lowest iomem resource that covers part of
302   *			 [@start..@end].
303   *
304   * If a resource is found, returns 0 and @*res is overwritten with the part
305   * of the resource that's within [@start..@end]; if none is found, returns
306   * -ENODEV.  Returns -EINVAL for invalid parameters.
307   *
308   * @start:	start address of the resource searched for
309   * @end:	end address of same resource
310   * @flags:	flags which the resource must have
311   * @desc:	descriptor the resource must have
312   * @res:	return ptr, if resource found
313   *
314   * The caller must specify @start, @end, @flags, and @desc
315   * (which may be IORES_DESC_NONE).
316   */
find_next_iomem_res(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,struct resource * res)317  static int find_next_iomem_res(resource_size_t start, resource_size_t end,
318  			       unsigned long flags, unsigned long desc,
319  			       struct resource *res)
320  {
321  	struct resource *p;
322  
323  	if (!res)
324  		return -EINVAL;
325  
326  	if (start >= end)
327  		return -EINVAL;
328  
329  	read_lock(&resource_lock);
330  
331  	for_each_resource(&iomem_resource, p, false) {
332  		/* If we passed the resource we are looking for, stop */
333  		if (p->start > end) {
334  			p = NULL;
335  			break;
336  		}
337  
338  		/* Skip until we find a range that matches what we look for */
339  		if (p->end < start)
340  			continue;
341  
342  		if ((p->flags & flags) != flags)
343  			continue;
344  		if ((desc != IORES_DESC_NONE) && (desc != p->desc))
345  			continue;
346  
347  		/* Found a match, break */
348  		break;
349  	}
350  
351  	if (p) {
352  		/* copy data */
353  		*res = (struct resource) {
354  			.start = max(start, p->start),
355  			.end = min(end, p->end),
356  			.flags = p->flags,
357  			.desc = p->desc,
358  			.parent = p->parent,
359  		};
360  	}
361  
362  	read_unlock(&resource_lock);
363  	return p ? 0 : -ENODEV;
364  }
365  
__walk_iomem_res_desc(resource_size_t start,resource_size_t end,unsigned long flags,unsigned long desc,void * arg,int (* func)(struct resource *,void *))366  static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
367  				 unsigned long flags, unsigned long desc,
368  				 void *arg,
369  				 int (*func)(struct resource *, void *))
370  {
371  	struct resource res;
372  	int ret = -EINVAL;
373  
374  	while (start < end &&
375  	       !find_next_iomem_res(start, end, flags, desc, &res)) {
376  		ret = (*func)(&res, arg);
377  		if (ret)
378  			break;
379  
380  		start = res.end + 1;
381  	}
382  
383  	return ret;
384  }
385  
386  /**
387   * walk_iomem_res_desc - Walks through iomem resources and calls func()
388   *			 with matching resource ranges.
389   * *
390   * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
391   * @flags: I/O resource flags
392   * @start: start addr
393   * @end: end addr
394   * @arg: function argument for the callback @func
395   * @func: callback function that is called for each qualifying resource area
396   *
397   * All the memory ranges which overlap start,end and also match flags and
398   * desc are valid candidates.
399   *
400   * NOTE: For a new descriptor search, define a new IORES_DESC in
401   * <linux/ioport.h> and set it in 'desc' of a target resource entry.
402   */
walk_iomem_res_desc(unsigned long desc,unsigned long flags,u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))403  int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
404  		u64 end, void *arg, int (*func)(struct resource *, void *))
405  {
406  	return __walk_iomem_res_desc(start, end, flags, desc, arg, func);
407  }
408  EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
409  
410  /*
411   * This function calls the @func callback against all memory ranges of type
412   * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
413   * Now, this function is only for System RAM, it deals with full ranges and
414   * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
415   * ranges.
416   */
walk_system_ram_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))417  int walk_system_ram_res(u64 start, u64 end, void *arg,
418  			int (*func)(struct resource *, void *))
419  {
420  	unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
421  
422  	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
423  				     func);
424  }
425  
426  /*
427   * This function, being a variant of walk_system_ram_res(), calls the @func
428   * callback against all memory ranges of type System RAM which are marked as
429   * IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY in reversed order, i.e., from
430   * higher to lower.
431   */
walk_system_ram_res_rev(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))432  int walk_system_ram_res_rev(u64 start, u64 end, void *arg,
433  				int (*func)(struct resource *, void *))
434  {
435  	struct resource res, *rams;
436  	int rams_size = 16, i;
437  	unsigned long flags;
438  	int ret = -1;
439  
440  	/* create a list */
441  	rams = kvcalloc(rams_size, sizeof(struct resource), GFP_KERNEL);
442  	if (!rams)
443  		return ret;
444  
445  	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
446  	i = 0;
447  	while ((start < end) &&
448  		(!find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res))) {
449  		if (i >= rams_size) {
450  			/* re-alloc */
451  			struct resource *rams_new;
452  
453  			rams_new = kvrealloc(rams, (rams_size + 16) * sizeof(struct resource),
454  					     GFP_KERNEL);
455  			if (!rams_new)
456  				goto out;
457  
458  			rams = rams_new;
459  			rams_size += 16;
460  		}
461  
462  		rams[i++] = res;
463  		start = res.end + 1;
464  	}
465  
466  	/* go reverse */
467  	for (i--; i >= 0; i--) {
468  		ret = (*func)(&rams[i], arg);
469  		if (ret)
470  			break;
471  	}
472  
473  out:
474  	kvfree(rams);
475  	return ret;
476  }
477  
478  /*
479   * This function calls the @func callback against all memory ranges, which
480   * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
481   */
walk_mem_res(u64 start,u64 end,void * arg,int (* func)(struct resource *,void *))482  int walk_mem_res(u64 start, u64 end, void *arg,
483  		 int (*func)(struct resource *, void *))
484  {
485  	unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
486  
487  	return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, arg,
488  				     func);
489  }
490  
491  /*
492   * This function calls the @func callback against all memory ranges of type
493   * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
494   * It is to be used only for System RAM.
495   */
walk_system_ram_range(unsigned long start_pfn,unsigned long nr_pages,void * arg,int (* func)(unsigned long,unsigned long,void *))496  int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
497  			  void *arg, int (*func)(unsigned long, unsigned long, void *))
498  {
499  	resource_size_t start, end;
500  	unsigned long flags;
501  	struct resource res;
502  	unsigned long pfn, end_pfn;
503  	int ret = -EINVAL;
504  
505  	start = (u64) start_pfn << PAGE_SHIFT;
506  	end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
507  	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
508  	while (start < end &&
509  	       !find_next_iomem_res(start, end, flags, IORES_DESC_NONE, &res)) {
510  		pfn = PFN_UP(res.start);
511  		end_pfn = PFN_DOWN(res.end + 1);
512  		if (end_pfn > pfn)
513  			ret = (*func)(pfn, end_pfn - pfn, arg);
514  		if (ret)
515  			break;
516  		start = res.end + 1;
517  	}
518  	return ret;
519  }
520  
__is_ram(unsigned long pfn,unsigned long nr_pages,void * arg)521  static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
522  {
523  	return 1;
524  }
525  
526  /*
527   * This generic page_is_ram() returns true if specified address is
528   * registered as System RAM in iomem_resource list.
529   */
page_is_ram(unsigned long pfn)530  int __weak page_is_ram(unsigned long pfn)
531  {
532  	return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
533  }
534  EXPORT_SYMBOL_GPL(page_is_ram);
535  
__region_intersects(struct resource * parent,resource_size_t start,size_t size,unsigned long flags,unsigned long desc)536  static int __region_intersects(struct resource *parent, resource_size_t start,
537  			       size_t size, unsigned long flags,
538  			       unsigned long desc)
539  {
540  	resource_size_t ostart, oend;
541  	int type = 0; int other = 0;
542  	struct resource *p, *dp;
543  	bool is_type, covered;
544  	struct resource res;
545  
546  	res.start = start;
547  	res.end = start + size - 1;
548  
549  	for (p = parent->child; p ; p = p->sibling) {
550  		if (!resource_overlaps(p, &res))
551  			continue;
552  		is_type = (p->flags & flags) == flags &&
553  			(desc == IORES_DESC_NONE || desc == p->desc);
554  		if (is_type) {
555  			type++;
556  			continue;
557  		}
558  		/*
559  		 * Continue to search in descendant resources as if the
560  		 * matched descendant resources cover some ranges of 'p'.
561  		 *
562  		 * |------------- "CXL Window 0" ------------|
563  		 * |-- "System RAM" --|
564  		 *
565  		 * will behave similar as the following fake resource
566  		 * tree when searching "System RAM".
567  		 *
568  		 * |-- "System RAM" --||-- "CXL Window 0a" --|
569  		 */
570  		covered = false;
571  		ostart = max(res.start, p->start);
572  		oend = min(res.end, p->end);
573  		for_each_resource(p, dp, false) {
574  			if (!resource_overlaps(dp, &res))
575  				continue;
576  			is_type = (dp->flags & flags) == flags &&
577  				(desc == IORES_DESC_NONE || desc == dp->desc);
578  			if (is_type) {
579  				type++;
580  				/*
581  				 * Range from 'ostart' to 'dp->start'
582  				 * isn't covered by matched resource.
583  				 */
584  				if (dp->start > ostart)
585  					break;
586  				if (dp->end >= oend) {
587  					covered = true;
588  					break;
589  				}
590  				/* Remove covered range */
591  				ostart = max(ostart, dp->end + 1);
592  			}
593  		}
594  		if (!covered)
595  			other++;
596  	}
597  
598  	if (type == 0)
599  		return REGION_DISJOINT;
600  
601  	if (other == 0)
602  		return REGION_INTERSECTS;
603  
604  	return REGION_MIXED;
605  }
606  
607  /**
608   * region_intersects() - determine intersection of region with known resources
609   * @start: region start address
610   * @size: size of region
611   * @flags: flags of resource (in iomem_resource)
612   * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
613   *
614   * Check if the specified region partially overlaps or fully eclipses a
615   * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
616   * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
617   * return REGION_MIXED if the region overlaps @flags/@desc and another
618   * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
619   * and no other defined resource. Note that REGION_INTERSECTS is also
620   * returned in the case when the specified region overlaps RAM and undefined
621   * memory holes.
622   *
623   * region_intersect() is used by memory remapping functions to ensure
624   * the user is not remapping RAM and is a vast speed up over walking
625   * through the resource table page by page.
626   */
region_intersects(resource_size_t start,size_t size,unsigned long flags,unsigned long desc)627  int region_intersects(resource_size_t start, size_t size, unsigned long flags,
628  		      unsigned long desc)
629  {
630  	int ret;
631  
632  	read_lock(&resource_lock);
633  	ret = __region_intersects(&iomem_resource, start, size, flags, desc);
634  	read_unlock(&resource_lock);
635  
636  	return ret;
637  }
638  EXPORT_SYMBOL_GPL(region_intersects);
639  
arch_remove_reservations(struct resource * avail)640  void __weak arch_remove_reservations(struct resource *avail)
641  {
642  }
643  
resource_clip(struct resource * res,resource_size_t min,resource_size_t max)644  static void resource_clip(struct resource *res, resource_size_t min,
645  			  resource_size_t max)
646  {
647  	if (res->start < min)
648  		res->start = min;
649  	if (res->end > max)
650  		res->end = max;
651  }
652  
653  /*
654   * Find empty space in the resource tree with the given range and
655   * alignment constraints
656   */
__find_resource_space(struct resource * root,struct resource * old,struct resource * new,resource_size_t size,struct resource_constraint * constraint)657  static int __find_resource_space(struct resource *root, struct resource *old,
658  				 struct resource *new, resource_size_t size,
659  				 struct resource_constraint *constraint)
660  {
661  	struct resource *this = root->child;
662  	struct resource tmp = *new, avail, alloc;
663  	resource_alignf alignf = constraint->alignf;
664  
665  	tmp.start = root->start;
666  	/*
667  	 * Skip past an allocated resource that starts at 0, since the assignment
668  	 * of this->start - 1 to tmp->end below would cause an underflow.
669  	 */
670  	if (this && this->start == root->start) {
671  		tmp.start = (this == old) ? old->start : this->end + 1;
672  		this = this->sibling;
673  	}
674  	for(;;) {
675  		if (this)
676  			tmp.end = (this == old) ?  this->end : this->start - 1;
677  		else
678  			tmp.end = root->end;
679  
680  		if (tmp.end < tmp.start)
681  			goto next;
682  
683  		resource_clip(&tmp, constraint->min, constraint->max);
684  		arch_remove_reservations(&tmp);
685  
686  		/* Check for overflow after ALIGN() */
687  		avail.start = ALIGN(tmp.start, constraint->align);
688  		avail.end = tmp.end;
689  		avail.flags = new->flags & ~IORESOURCE_UNSET;
690  		if (avail.start >= tmp.start) {
691  			alloc.flags = avail.flags;
692  			if (alignf) {
693  				alloc.start = alignf(constraint->alignf_data,
694  						     &avail, size, constraint->align);
695  			} else {
696  				alloc.start = avail.start;
697  			}
698  			alloc.end = alloc.start + size - 1;
699  			if (alloc.start <= alloc.end &&
700  			    resource_contains(&avail, &alloc)) {
701  				new->start = alloc.start;
702  				new->end = alloc.end;
703  				return 0;
704  			}
705  		}
706  
707  next:		if (!this || this->end == root->end)
708  			break;
709  
710  		if (this != old)
711  			tmp.start = this->end + 1;
712  		this = this->sibling;
713  	}
714  	return -EBUSY;
715  }
716  
717  /**
718   * find_resource_space - Find empty space in the resource tree
719   * @root:	Root resource descriptor
720   * @new:	Resource descriptor awaiting an empty resource space
721   * @size:	The minimum size of the empty space
722   * @constraint:	The range and alignment constraints to be met
723   *
724   * Finds an empty space under @root in the resource tree satisfying range and
725   * alignment @constraints.
726   *
727   * Return:
728   * * %0		- if successful, @new members start, end, and flags are altered.
729   * * %-EBUSY	- if no empty space was found.
730   */
find_resource_space(struct resource * root,struct resource * new,resource_size_t size,struct resource_constraint * constraint)731  int find_resource_space(struct resource *root, struct resource *new,
732  			resource_size_t size,
733  			struct resource_constraint *constraint)
734  {
735  	return  __find_resource_space(root, NULL, new, size, constraint);
736  }
737  EXPORT_SYMBOL_GPL(find_resource_space);
738  
739  /**
740   * reallocate_resource - allocate a slot in the resource tree given range & alignment.
741   *	The resource will be relocated if the new size cannot be reallocated in the
742   *	current location.
743   *
744   * @root: root resource descriptor
745   * @old:  resource descriptor desired by caller
746   * @newsize: new size of the resource descriptor
747   * @constraint: the size and alignment constraints to be met.
748   */
reallocate_resource(struct resource * root,struct resource * old,resource_size_t newsize,struct resource_constraint * constraint)749  static int reallocate_resource(struct resource *root, struct resource *old,
750  			       resource_size_t newsize,
751  			       struct resource_constraint *constraint)
752  {
753  	int err=0;
754  	struct resource new = *old;
755  	struct resource *conflict;
756  
757  	write_lock(&resource_lock);
758  
759  	if ((err = __find_resource_space(root, old, &new, newsize, constraint)))
760  		goto out;
761  
762  	if (resource_contains(&new, old)) {
763  		old->start = new.start;
764  		old->end = new.end;
765  		goto out;
766  	}
767  
768  	if (old->child) {
769  		err = -EBUSY;
770  		goto out;
771  	}
772  
773  	if (resource_contains(old, &new)) {
774  		old->start = new.start;
775  		old->end = new.end;
776  	} else {
777  		__release_resource(old, true);
778  		*old = new;
779  		conflict = __request_resource(root, old);
780  		BUG_ON(conflict);
781  	}
782  out:
783  	write_unlock(&resource_lock);
784  	return err;
785  }
786  
787  
788  /**
789   * allocate_resource - allocate empty slot in the resource tree given range & alignment.
790   * 	The resource will be reallocated with a new size if it was already allocated
791   * @root: root resource descriptor
792   * @new: resource descriptor desired by caller
793   * @size: requested resource region size
794   * @min: minimum boundary to allocate
795   * @max: maximum boundary to allocate
796   * @align: alignment requested, in bytes
797   * @alignf: alignment function, optional, called if not NULL
798   * @alignf_data: arbitrary data to pass to the @alignf function
799   */
allocate_resource(struct resource * root,struct resource * new,resource_size_t size,resource_size_t min,resource_size_t max,resource_size_t align,resource_alignf alignf,void * alignf_data)800  int allocate_resource(struct resource *root, struct resource *new,
801  		      resource_size_t size, resource_size_t min,
802  		      resource_size_t max, resource_size_t align,
803  		      resource_alignf alignf,
804  		      void *alignf_data)
805  {
806  	int err;
807  	struct resource_constraint constraint;
808  
809  	constraint.min = min;
810  	constraint.max = max;
811  	constraint.align = align;
812  	constraint.alignf = alignf;
813  	constraint.alignf_data = alignf_data;
814  
815  	if ( new->parent ) {
816  		/* resource is already allocated, try reallocating with
817  		   the new constraints */
818  		return reallocate_resource(root, new, size, &constraint);
819  	}
820  
821  	write_lock(&resource_lock);
822  	err = find_resource_space(root, new, size, &constraint);
823  	if (err >= 0 && __request_resource(root, new))
824  		err = -EBUSY;
825  	write_unlock(&resource_lock);
826  	return err;
827  }
828  
829  EXPORT_SYMBOL(allocate_resource);
830  
831  /**
832   * lookup_resource - find an existing resource by a resource start address
833   * @root: root resource descriptor
834   * @start: resource start address
835   *
836   * Returns a pointer to the resource if found, NULL otherwise
837   */
lookup_resource(struct resource * root,resource_size_t start)838  struct resource *lookup_resource(struct resource *root, resource_size_t start)
839  {
840  	struct resource *res;
841  
842  	read_lock(&resource_lock);
843  	for (res = root->child; res; res = res->sibling) {
844  		if (res->start == start)
845  			break;
846  	}
847  	read_unlock(&resource_lock);
848  
849  	return res;
850  }
851  
852  /*
853   * Insert a resource into the resource tree. If successful, return NULL,
854   * otherwise return the conflicting resource (compare to __request_resource())
855   */
__insert_resource(struct resource * parent,struct resource * new)856  static struct resource * __insert_resource(struct resource *parent, struct resource *new)
857  {
858  	struct resource *first, *next;
859  
860  	for (;; parent = first) {
861  		first = __request_resource(parent, new);
862  		if (!first)
863  			return first;
864  
865  		if (first == parent)
866  			return first;
867  		if (WARN_ON(first == new))	/* duplicated insertion */
868  			return first;
869  
870  		if ((first->start > new->start) || (first->end < new->end))
871  			break;
872  		if ((first->start == new->start) && (first->end == new->end))
873  			break;
874  	}
875  
876  	for (next = first; ; next = next->sibling) {
877  		/* Partial overlap? Bad, and unfixable */
878  		if (next->start < new->start || next->end > new->end)
879  			return next;
880  		if (!next->sibling)
881  			break;
882  		if (next->sibling->start > new->end)
883  			break;
884  	}
885  
886  	new->parent = parent;
887  	new->sibling = next->sibling;
888  	new->child = first;
889  
890  	next->sibling = NULL;
891  	for (next = first; next; next = next->sibling)
892  		next->parent = new;
893  
894  	if (parent->child == first) {
895  		parent->child = new;
896  	} else {
897  		next = parent->child;
898  		while (next->sibling != first)
899  			next = next->sibling;
900  		next->sibling = new;
901  	}
902  	return NULL;
903  }
904  
905  /**
906   * insert_resource_conflict - Inserts resource in the resource tree
907   * @parent: parent of the new resource
908   * @new: new resource to insert
909   *
910   * Returns 0 on success, conflict resource if the resource can't be inserted.
911   *
912   * This function is equivalent to request_resource_conflict when no conflict
913   * happens. If a conflict happens, and the conflicting resources
914   * entirely fit within the range of the new resource, then the new
915   * resource is inserted and the conflicting resources become children of
916   * the new resource.
917   *
918   * This function is intended for producers of resources, such as FW modules
919   * and bus drivers.
920   */
insert_resource_conflict(struct resource * parent,struct resource * new)921  struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
922  {
923  	struct resource *conflict;
924  
925  	write_lock(&resource_lock);
926  	conflict = __insert_resource(parent, new);
927  	write_unlock(&resource_lock);
928  	return conflict;
929  }
930  
931  /**
932   * insert_resource - Inserts a resource in the resource tree
933   * @parent: parent of the new resource
934   * @new: new resource to insert
935   *
936   * Returns 0 on success, -EBUSY if the resource can't be inserted.
937   *
938   * This function is intended for producers of resources, such as FW modules
939   * and bus drivers.
940   */
insert_resource(struct resource * parent,struct resource * new)941  int insert_resource(struct resource *parent, struct resource *new)
942  {
943  	struct resource *conflict;
944  
945  	conflict = insert_resource_conflict(parent, new);
946  	return conflict ? -EBUSY : 0;
947  }
948  EXPORT_SYMBOL_GPL(insert_resource);
949  
950  /**
951   * insert_resource_expand_to_fit - Insert a resource into the resource tree
952   * @root: root resource descriptor
953   * @new: new resource to insert
954   *
955   * Insert a resource into the resource tree, possibly expanding it in order
956   * to make it encompass any conflicting resources.
957   */
insert_resource_expand_to_fit(struct resource * root,struct resource * new)958  void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
959  {
960  	if (new->parent)
961  		return;
962  
963  	write_lock(&resource_lock);
964  	for (;;) {
965  		struct resource *conflict;
966  
967  		conflict = __insert_resource(root, new);
968  		if (!conflict)
969  			break;
970  		if (conflict == root)
971  			break;
972  
973  		/* Ok, expand resource to cover the conflict, then try again .. */
974  		if (conflict->start < new->start)
975  			new->start = conflict->start;
976  		if (conflict->end > new->end)
977  			new->end = conflict->end;
978  
979  		pr_info("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
980  	}
981  	write_unlock(&resource_lock);
982  }
983  /*
984   * Not for general consumption, only early boot memory map parsing, PCI
985   * resource discovery, and late discovery of CXL resources are expected
986   * to use this interface. The former are built-in and only the latter,
987   * CXL, is a module.
988   */
989  EXPORT_SYMBOL_NS_GPL(insert_resource_expand_to_fit, CXL);
990  
991  /**
992   * remove_resource - Remove a resource in the resource tree
993   * @old: resource to remove
994   *
995   * Returns 0 on success, -EINVAL if the resource is not valid.
996   *
997   * This function removes a resource previously inserted by insert_resource()
998   * or insert_resource_conflict(), and moves the children (if any) up to
999   * where they were before.  insert_resource() and insert_resource_conflict()
1000   * insert a new resource, and move any conflicting resources down to the
1001   * children of the new resource.
1002   *
1003   * insert_resource(), insert_resource_conflict() and remove_resource() are
1004   * intended for producers of resources, such as FW modules and bus drivers.
1005   */
remove_resource(struct resource * old)1006  int remove_resource(struct resource *old)
1007  {
1008  	int retval;
1009  
1010  	write_lock(&resource_lock);
1011  	retval = __release_resource(old, false);
1012  	write_unlock(&resource_lock);
1013  	return retval;
1014  }
1015  EXPORT_SYMBOL_GPL(remove_resource);
1016  
__adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1017  static int __adjust_resource(struct resource *res, resource_size_t start,
1018  				resource_size_t size)
1019  {
1020  	struct resource *tmp, *parent = res->parent;
1021  	resource_size_t end = start + size - 1;
1022  	int result = -EBUSY;
1023  
1024  	if (!parent)
1025  		goto skip;
1026  
1027  	if ((start < parent->start) || (end > parent->end))
1028  		goto out;
1029  
1030  	if (res->sibling && (res->sibling->start <= end))
1031  		goto out;
1032  
1033  	tmp = parent->child;
1034  	if (tmp != res) {
1035  		while (tmp->sibling != res)
1036  			tmp = tmp->sibling;
1037  		if (start <= tmp->end)
1038  			goto out;
1039  	}
1040  
1041  skip:
1042  	for (tmp = res->child; tmp; tmp = tmp->sibling)
1043  		if ((tmp->start < start) || (tmp->end > end))
1044  			goto out;
1045  
1046  	res->start = start;
1047  	res->end = end;
1048  	result = 0;
1049  
1050   out:
1051  	return result;
1052  }
1053  
1054  /**
1055   * adjust_resource - modify a resource's start and size
1056   * @res: resource to modify
1057   * @start: new start value
1058   * @size: new size
1059   *
1060   * Given an existing resource, change its start and size to match the
1061   * arguments.  Returns 0 on success, -EBUSY if it can't fit.
1062   * Existing children of the resource are assumed to be immutable.
1063   */
adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)1064  int adjust_resource(struct resource *res, resource_size_t start,
1065  		    resource_size_t size)
1066  {
1067  	int result;
1068  
1069  	write_lock(&resource_lock);
1070  	result = __adjust_resource(res, start, size);
1071  	write_unlock(&resource_lock);
1072  	return result;
1073  }
1074  EXPORT_SYMBOL(adjust_resource);
1075  
1076  static void __init
__reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1077  __reserve_region_with_split(struct resource *root, resource_size_t start,
1078  			    resource_size_t end, const char *name)
1079  {
1080  	struct resource *parent = root;
1081  	struct resource *conflict;
1082  	struct resource *res = alloc_resource(GFP_ATOMIC);
1083  	struct resource *next_res = NULL;
1084  	int type = resource_type(root);
1085  
1086  	if (!res)
1087  		return;
1088  
1089  	res->name = name;
1090  	res->start = start;
1091  	res->end = end;
1092  	res->flags = type | IORESOURCE_BUSY;
1093  	res->desc = IORES_DESC_NONE;
1094  
1095  	while (1) {
1096  
1097  		conflict = __request_resource(parent, res);
1098  		if (!conflict) {
1099  			if (!next_res)
1100  				break;
1101  			res = next_res;
1102  			next_res = NULL;
1103  			continue;
1104  		}
1105  
1106  		/* conflict covered whole area */
1107  		if (conflict->start <= res->start &&
1108  				conflict->end >= res->end) {
1109  			free_resource(res);
1110  			WARN_ON(next_res);
1111  			break;
1112  		}
1113  
1114  		/* failed, split and try again */
1115  		if (conflict->start > res->start) {
1116  			end = res->end;
1117  			res->end = conflict->start - 1;
1118  			if (conflict->end < end) {
1119  				next_res = alloc_resource(GFP_ATOMIC);
1120  				if (!next_res) {
1121  					free_resource(res);
1122  					break;
1123  				}
1124  				next_res->name = name;
1125  				next_res->start = conflict->end + 1;
1126  				next_res->end = end;
1127  				next_res->flags = type | IORESOURCE_BUSY;
1128  				next_res->desc = IORES_DESC_NONE;
1129  			}
1130  		} else {
1131  			res->start = conflict->end + 1;
1132  		}
1133  	}
1134  
1135  }
1136  
1137  void __init
reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)1138  reserve_region_with_split(struct resource *root, resource_size_t start,
1139  			  resource_size_t end, const char *name)
1140  {
1141  	int abort = 0;
1142  
1143  	write_lock(&resource_lock);
1144  	if (root->start > start || root->end < end) {
1145  		pr_err("requested range [0x%llx-0x%llx] not in root %pr\n",
1146  		       (unsigned long long)start, (unsigned long long)end,
1147  		       root);
1148  		if (start > root->end || end < root->start)
1149  			abort = 1;
1150  		else {
1151  			if (end > root->end)
1152  				end = root->end;
1153  			if (start < root->start)
1154  				start = root->start;
1155  			pr_err("fixing request to [0x%llx-0x%llx]\n",
1156  			       (unsigned long long)start,
1157  			       (unsigned long long)end);
1158  		}
1159  		dump_stack();
1160  	}
1161  	if (!abort)
1162  		__reserve_region_with_split(root, start, end, name);
1163  	write_unlock(&resource_lock);
1164  }
1165  
1166  /**
1167   * resource_alignment - calculate resource's alignment
1168   * @res: resource pointer
1169   *
1170   * Returns alignment on success, 0 (invalid alignment) on failure.
1171   */
resource_alignment(struct resource * res)1172  resource_size_t resource_alignment(struct resource *res)
1173  {
1174  	switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
1175  	case IORESOURCE_SIZEALIGN:
1176  		return resource_size(res);
1177  	case IORESOURCE_STARTALIGN:
1178  		return res->start;
1179  	default:
1180  		return 0;
1181  	}
1182  }
1183  
1184  /*
1185   * This is compatibility stuff for IO resources.
1186   *
1187   * Note how this, unlike the above, knows about
1188   * the IO flag meanings (busy etc).
1189   *
1190   * request_region creates a new busy region.
1191   *
1192   * release_region releases a matching busy region.
1193   */
1194  
1195  static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
1196  
1197  static struct inode *iomem_inode;
1198  
1199  #ifdef CONFIG_IO_STRICT_DEVMEM
revoke_iomem(struct resource * res)1200  static void revoke_iomem(struct resource *res)
1201  {
1202  	/* pairs with smp_store_release() in iomem_init_inode() */
1203  	struct inode *inode = smp_load_acquire(&iomem_inode);
1204  
1205  	/*
1206  	 * Check that the initialization has completed. Losing the race
1207  	 * is ok because it means drivers are claiming resources before
1208  	 * the fs_initcall level of init and prevent iomem_get_mapping users
1209  	 * from establishing mappings.
1210  	 */
1211  	if (!inode)
1212  		return;
1213  
1214  	/*
1215  	 * The expectation is that the driver has successfully marked
1216  	 * the resource busy by this point, so devmem_is_allowed()
1217  	 * should start returning false, however for performance this
1218  	 * does not iterate the entire resource range.
1219  	 */
1220  	if (devmem_is_allowed(PHYS_PFN(res->start)) &&
1221  	    devmem_is_allowed(PHYS_PFN(res->end))) {
1222  		/*
1223  		 * *cringe* iomem=relaxed says "go ahead, what's the
1224  		 * worst that can happen?"
1225  		 */
1226  		return;
1227  	}
1228  
1229  	unmap_mapping_range(inode->i_mapping, res->start, resource_size(res), 1);
1230  }
1231  #else
revoke_iomem(struct resource * res)1232  static void revoke_iomem(struct resource *res) {}
1233  #endif
1234  
iomem_get_mapping(void)1235  struct address_space *iomem_get_mapping(void)
1236  {
1237  	/*
1238  	 * This function is only called from file open paths, hence guaranteed
1239  	 * that fs_initcalls have completed and no need to check for NULL. But
1240  	 * since revoke_iomem can be called before the initcall we still need
1241  	 * the barrier to appease checkers.
1242  	 */
1243  	return smp_load_acquire(&iomem_inode)->i_mapping;
1244  }
1245  
__request_region_locked(struct resource * res,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1246  static int __request_region_locked(struct resource *res, struct resource *parent,
1247  				   resource_size_t start, resource_size_t n,
1248  				   const char *name, int flags)
1249  {
1250  	DECLARE_WAITQUEUE(wait, current);
1251  
1252  	res->name = name;
1253  	res->start = start;
1254  	res->end = start + n - 1;
1255  
1256  	for (;;) {
1257  		struct resource *conflict;
1258  
1259  		res->flags = resource_type(parent) | resource_ext_type(parent);
1260  		res->flags |= IORESOURCE_BUSY | flags;
1261  		res->desc = parent->desc;
1262  
1263  		conflict = __request_resource(parent, res);
1264  		if (!conflict)
1265  			break;
1266  		/*
1267  		 * mm/hmm.c reserves physical addresses which then
1268  		 * become unavailable to other users.  Conflicts are
1269  		 * not expected.  Warn to aid debugging if encountered.
1270  		 */
1271  		if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) {
1272  			pr_warn("Unaddressable device %s %pR conflicts with %pR",
1273  				conflict->name, conflict, res);
1274  		}
1275  		if (conflict != parent) {
1276  			if (!(conflict->flags & IORESOURCE_BUSY)) {
1277  				parent = conflict;
1278  				continue;
1279  			}
1280  		}
1281  		if (conflict->flags & flags & IORESOURCE_MUXED) {
1282  			add_wait_queue(&muxed_resource_wait, &wait);
1283  			write_unlock(&resource_lock);
1284  			set_current_state(TASK_UNINTERRUPTIBLE);
1285  			schedule();
1286  			remove_wait_queue(&muxed_resource_wait, &wait);
1287  			write_lock(&resource_lock);
1288  			continue;
1289  		}
1290  		/* Uhhuh, that didn't work out.. */
1291  		return -EBUSY;
1292  	}
1293  
1294  	return 0;
1295  }
1296  
1297  /**
1298   * __request_region - create a new busy resource region
1299   * @parent: parent resource descriptor
1300   * @start: resource start address
1301   * @n: resource region size
1302   * @name: reserving caller's ID string
1303   * @flags: IO resource flags
1304   */
__request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)1305  struct resource *__request_region(struct resource *parent,
1306  				  resource_size_t start, resource_size_t n,
1307  				  const char *name, int flags)
1308  {
1309  	struct resource *res = alloc_resource(GFP_KERNEL);
1310  	int ret;
1311  
1312  	if (!res)
1313  		return NULL;
1314  
1315  	write_lock(&resource_lock);
1316  	ret = __request_region_locked(res, parent, start, n, name, flags);
1317  	write_unlock(&resource_lock);
1318  
1319  	if (ret) {
1320  		free_resource(res);
1321  		return NULL;
1322  	}
1323  
1324  	if (parent == &iomem_resource)
1325  		revoke_iomem(res);
1326  
1327  	return res;
1328  }
1329  EXPORT_SYMBOL(__request_region);
1330  
1331  /**
1332   * __release_region - release a previously reserved resource region
1333   * @parent: parent resource descriptor
1334   * @start: resource start address
1335   * @n: resource region size
1336   *
1337   * The described resource region must match a currently busy region.
1338   */
__release_region(struct resource * parent,resource_size_t start,resource_size_t n)1339  void __release_region(struct resource *parent, resource_size_t start,
1340  		      resource_size_t n)
1341  {
1342  	struct resource **p;
1343  	resource_size_t end;
1344  
1345  	p = &parent->child;
1346  	end = start + n - 1;
1347  
1348  	write_lock(&resource_lock);
1349  
1350  	for (;;) {
1351  		struct resource *res = *p;
1352  
1353  		if (!res)
1354  			break;
1355  		if (res->start <= start && res->end >= end) {
1356  			if (!(res->flags & IORESOURCE_BUSY)) {
1357  				p = &res->child;
1358  				continue;
1359  			}
1360  			if (res->start != start || res->end != end)
1361  				break;
1362  			*p = res->sibling;
1363  			write_unlock(&resource_lock);
1364  			if (res->flags & IORESOURCE_MUXED)
1365  				wake_up(&muxed_resource_wait);
1366  			free_resource(res);
1367  			return;
1368  		}
1369  		p = &res->sibling;
1370  	}
1371  
1372  	write_unlock(&resource_lock);
1373  
1374  	pr_warn("Trying to free nonexistent resource <%pa-%pa>\n", &start, &end);
1375  }
1376  EXPORT_SYMBOL(__release_region);
1377  
1378  #ifdef CONFIG_MEMORY_HOTREMOVE
1379  /**
1380   * release_mem_region_adjustable - release a previously reserved memory region
1381   * @start: resource start address
1382   * @size: resource region size
1383   *
1384   * This interface is intended for memory hot-delete.  The requested region
1385   * is released from a currently busy memory resource.  The requested region
1386   * must either match exactly or fit into a single busy resource entry.  In
1387   * the latter case, the remaining resource is adjusted accordingly.
1388   * Existing children of the busy memory resource must be immutable in the
1389   * request.
1390   *
1391   * Note:
1392   * - Additional release conditions, such as overlapping region, can be
1393   *   supported after they are confirmed as valid cases.
1394   * - When a busy memory resource gets split into two entries, the code
1395   *   assumes that all children remain in the lower address entry for
1396   *   simplicity.  Enhance this logic when necessary.
1397   */
release_mem_region_adjustable(resource_size_t start,resource_size_t size)1398  void release_mem_region_adjustable(resource_size_t start, resource_size_t size)
1399  {
1400  	struct resource *parent = &iomem_resource;
1401  	struct resource *new_res = NULL;
1402  	bool alloc_nofail = false;
1403  	struct resource **p;
1404  	struct resource *res;
1405  	resource_size_t end;
1406  
1407  	end = start + size - 1;
1408  	if (WARN_ON_ONCE((start < parent->start) || (end > parent->end)))
1409  		return;
1410  
1411  	/*
1412  	 * We free up quite a lot of memory on memory hotunplug (esp., memap),
1413  	 * just before releasing the region. This is highly unlikely to
1414  	 * fail - let's play save and make it never fail as the caller cannot
1415  	 * perform any error handling (e.g., trying to re-add memory will fail
1416  	 * similarly).
1417  	 */
1418  retry:
1419  	new_res = alloc_resource(GFP_KERNEL | (alloc_nofail ? __GFP_NOFAIL : 0));
1420  
1421  	p = &parent->child;
1422  	write_lock(&resource_lock);
1423  
1424  	while ((res = *p)) {
1425  		if (res->start >= end)
1426  			break;
1427  
1428  		/* look for the next resource if it does not fit into */
1429  		if (res->start > start || res->end < end) {
1430  			p = &res->sibling;
1431  			continue;
1432  		}
1433  
1434  		if (!(res->flags & IORESOURCE_MEM))
1435  			break;
1436  
1437  		if (!(res->flags & IORESOURCE_BUSY)) {
1438  			p = &res->child;
1439  			continue;
1440  		}
1441  
1442  		/* found the target resource; let's adjust accordingly */
1443  		if (res->start == start && res->end == end) {
1444  			/* free the whole entry */
1445  			*p = res->sibling;
1446  			free_resource(res);
1447  		} else if (res->start == start && res->end != end) {
1448  			/* adjust the start */
1449  			WARN_ON_ONCE(__adjust_resource(res, end + 1,
1450  						       res->end - end));
1451  		} else if (res->start != start && res->end == end) {
1452  			/* adjust the end */
1453  			WARN_ON_ONCE(__adjust_resource(res, res->start,
1454  						       start - res->start));
1455  		} else {
1456  			/* split into two entries - we need a new resource */
1457  			if (!new_res) {
1458  				new_res = alloc_resource(GFP_ATOMIC);
1459  				if (!new_res) {
1460  					alloc_nofail = true;
1461  					write_unlock(&resource_lock);
1462  					goto retry;
1463  				}
1464  			}
1465  			new_res->name = res->name;
1466  			new_res->start = end + 1;
1467  			new_res->end = res->end;
1468  			new_res->flags = res->flags;
1469  			new_res->desc = res->desc;
1470  			new_res->parent = res->parent;
1471  			new_res->sibling = res->sibling;
1472  			new_res->child = NULL;
1473  
1474  			if (WARN_ON_ONCE(__adjust_resource(res, res->start,
1475  							   start - res->start)))
1476  				break;
1477  			res->sibling = new_res;
1478  			new_res = NULL;
1479  		}
1480  
1481  		break;
1482  	}
1483  
1484  	write_unlock(&resource_lock);
1485  	free_resource(new_res);
1486  }
1487  #endif	/* CONFIG_MEMORY_HOTREMOVE */
1488  
1489  #ifdef CONFIG_MEMORY_HOTPLUG
system_ram_resources_mergeable(struct resource * r1,struct resource * r2)1490  static bool system_ram_resources_mergeable(struct resource *r1,
1491  					   struct resource *r2)
1492  {
1493  	/* We assume either r1 or r2 is IORESOURCE_SYSRAM_MERGEABLE. */
1494  	return r1->flags == r2->flags && r1->end + 1 == r2->start &&
1495  	       r1->name == r2->name && r1->desc == r2->desc &&
1496  	       !r1->child && !r2->child;
1497  }
1498  
1499  /**
1500   * merge_system_ram_resource - mark the System RAM resource mergeable and try to
1501   *	merge it with adjacent, mergeable resources
1502   * @res: resource descriptor
1503   *
1504   * This interface is intended for memory hotplug, whereby lots of contiguous
1505   * system ram resources are added (e.g., via add_memory*()) by a driver, and
1506   * the actual resource boundaries are not of interest (e.g., it might be
1507   * relevant for DIMMs). Only resources that are marked mergeable, that have the
1508   * same parent, and that don't have any children are considered. All mergeable
1509   * resources must be immutable during the request.
1510   *
1511   * Note:
1512   * - The caller has to make sure that no pointers to resources that are
1513   *   marked mergeable are used anymore after this call - the resource might
1514   *   be freed and the pointer might be stale!
1515   * - release_mem_region_adjustable() will split on demand on memory hotunplug
1516   */
merge_system_ram_resource(struct resource * res)1517  void merge_system_ram_resource(struct resource *res)
1518  {
1519  	const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1520  	struct resource *cur;
1521  
1522  	if (WARN_ON_ONCE((res->flags & flags) != flags))
1523  		return;
1524  
1525  	write_lock(&resource_lock);
1526  	res->flags |= IORESOURCE_SYSRAM_MERGEABLE;
1527  
1528  	/* Try to merge with next item in the list. */
1529  	cur = res->sibling;
1530  	if (cur && system_ram_resources_mergeable(res, cur)) {
1531  		res->end = cur->end;
1532  		res->sibling = cur->sibling;
1533  		free_resource(cur);
1534  	}
1535  
1536  	/* Try to merge with previous item in the list. */
1537  	cur = res->parent->child;
1538  	while (cur && cur->sibling != res)
1539  		cur = cur->sibling;
1540  	if (cur && system_ram_resources_mergeable(cur, res)) {
1541  		cur->end = res->end;
1542  		cur->sibling = res->sibling;
1543  		free_resource(res);
1544  	}
1545  	write_unlock(&resource_lock);
1546  }
1547  #endif	/* CONFIG_MEMORY_HOTPLUG */
1548  
1549  /*
1550   * Managed region resource
1551   */
devm_resource_release(struct device * dev,void * ptr)1552  static void devm_resource_release(struct device *dev, void *ptr)
1553  {
1554  	struct resource **r = ptr;
1555  
1556  	release_resource(*r);
1557  }
1558  
1559  /**
1560   * devm_request_resource() - request and reserve an I/O or memory resource
1561   * @dev: device for which to request the resource
1562   * @root: root of the resource tree from which to request the resource
1563   * @new: descriptor of the resource to request
1564   *
1565   * This is a device-managed version of request_resource(). There is usually
1566   * no need to release resources requested by this function explicitly since
1567   * that will be taken care of when the device is unbound from its driver.
1568   * If for some reason the resource needs to be released explicitly, because
1569   * of ordering issues for example, drivers must call devm_release_resource()
1570   * rather than the regular release_resource().
1571   *
1572   * When a conflict is detected between any existing resources and the newly
1573   * requested resource, an error message will be printed.
1574   *
1575   * Returns 0 on success or a negative error code on failure.
1576   */
devm_request_resource(struct device * dev,struct resource * root,struct resource * new)1577  int devm_request_resource(struct device *dev, struct resource *root,
1578  			  struct resource *new)
1579  {
1580  	struct resource *conflict, **ptr;
1581  
1582  	ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
1583  	if (!ptr)
1584  		return -ENOMEM;
1585  
1586  	*ptr = new;
1587  
1588  	conflict = request_resource_conflict(root, new);
1589  	if (conflict) {
1590  		dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
1591  			new, conflict->name, conflict);
1592  		devres_free(ptr);
1593  		return -EBUSY;
1594  	}
1595  
1596  	devres_add(dev, ptr);
1597  	return 0;
1598  }
1599  EXPORT_SYMBOL(devm_request_resource);
1600  
devm_resource_match(struct device * dev,void * res,void * data)1601  static int devm_resource_match(struct device *dev, void *res, void *data)
1602  {
1603  	struct resource **ptr = res;
1604  
1605  	return *ptr == data;
1606  }
1607  
1608  /**
1609   * devm_release_resource() - release a previously requested resource
1610   * @dev: device for which to release the resource
1611   * @new: descriptor of the resource to release
1612   *
1613   * Releases a resource previously requested using devm_request_resource().
1614   */
devm_release_resource(struct device * dev,struct resource * new)1615  void devm_release_resource(struct device *dev, struct resource *new)
1616  {
1617  	WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
1618  			       new));
1619  }
1620  EXPORT_SYMBOL(devm_release_resource);
1621  
1622  struct region_devres {
1623  	struct resource *parent;
1624  	resource_size_t start;
1625  	resource_size_t n;
1626  };
1627  
devm_region_release(struct device * dev,void * res)1628  static void devm_region_release(struct device *dev, void *res)
1629  {
1630  	struct region_devres *this = res;
1631  
1632  	__release_region(this->parent, this->start, this->n);
1633  }
1634  
devm_region_match(struct device * dev,void * res,void * match_data)1635  static int devm_region_match(struct device *dev, void *res, void *match_data)
1636  {
1637  	struct region_devres *this = res, *match = match_data;
1638  
1639  	return this->parent == match->parent &&
1640  		this->start == match->start && this->n == match->n;
1641  }
1642  
1643  struct resource *
__devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)1644  __devm_request_region(struct device *dev, struct resource *parent,
1645  		      resource_size_t start, resource_size_t n, const char *name)
1646  {
1647  	struct region_devres *dr = NULL;
1648  	struct resource *res;
1649  
1650  	dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1651  			  GFP_KERNEL);
1652  	if (!dr)
1653  		return NULL;
1654  
1655  	dr->parent = parent;
1656  	dr->start = start;
1657  	dr->n = n;
1658  
1659  	res = __request_region(parent, start, n, name, 0);
1660  	if (res)
1661  		devres_add(dev, dr);
1662  	else
1663  		devres_free(dr);
1664  
1665  	return res;
1666  }
1667  EXPORT_SYMBOL(__devm_request_region);
1668  
__devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)1669  void __devm_release_region(struct device *dev, struct resource *parent,
1670  			   resource_size_t start, resource_size_t n)
1671  {
1672  	struct region_devres match_data = { parent, start, n };
1673  
1674  	__release_region(parent, start, n);
1675  	WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1676  			       &match_data));
1677  }
1678  EXPORT_SYMBOL(__devm_release_region);
1679  
1680  /*
1681   * Reserve I/O ports or memory based on "reserve=" kernel parameter.
1682   */
1683  #define MAXRESERVE 4
reserve_setup(char * str)1684  static int __init reserve_setup(char *str)
1685  {
1686  	static int reserved;
1687  	static struct resource reserve[MAXRESERVE];
1688  
1689  	for (;;) {
1690  		unsigned int io_start, io_num;
1691  		int x = reserved;
1692  		struct resource *parent;
1693  
1694  		if (get_option(&str, &io_start) != 2)
1695  			break;
1696  		if (get_option(&str, &io_num) == 0)
1697  			break;
1698  		if (x < MAXRESERVE) {
1699  			struct resource *res = reserve + x;
1700  
1701  			/*
1702  			 * If the region starts below 0x10000, we assume it's
1703  			 * I/O port space; otherwise assume it's memory.
1704  			 */
1705  			if (io_start < 0x10000) {
1706  				res->flags = IORESOURCE_IO;
1707  				parent = &ioport_resource;
1708  			} else {
1709  				res->flags = IORESOURCE_MEM;
1710  				parent = &iomem_resource;
1711  			}
1712  			res->name = "reserved";
1713  			res->start = io_start;
1714  			res->end = io_start + io_num - 1;
1715  			res->flags |= IORESOURCE_BUSY;
1716  			res->desc = IORES_DESC_NONE;
1717  			res->child = NULL;
1718  			if (request_resource(parent, res) == 0)
1719  				reserved = x+1;
1720  		}
1721  	}
1722  	return 1;
1723  }
1724  __setup("reserve=", reserve_setup);
1725  
1726  /*
1727   * Check if the requested addr and size spans more than any slot in the
1728   * iomem resource tree.
1729   */
iomem_map_sanity_check(resource_size_t addr,unsigned long size)1730  int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1731  {
1732  	resource_size_t end = addr + size - 1;
1733  	struct resource *p;
1734  	int err = 0;
1735  
1736  	read_lock(&resource_lock);
1737  	for_each_resource(&iomem_resource, p, false) {
1738  		/*
1739  		 * We can probably skip the resources without
1740  		 * IORESOURCE_IO attribute?
1741  		 */
1742  		if (p->start > end)
1743  			continue;
1744  		if (p->end < addr)
1745  			continue;
1746  		if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1747  		    PFN_DOWN(p->end) >= PFN_DOWN(end))
1748  			continue;
1749  		/*
1750  		 * if a resource is "BUSY", it's not a hardware resource
1751  		 * but a driver mapping of such a resource; we don't want
1752  		 * to warn for those; some drivers legitimately map only
1753  		 * partial hardware resources. (example: vesafb)
1754  		 */
1755  		if (p->flags & IORESOURCE_BUSY)
1756  			continue;
1757  
1758  		pr_warn("resource sanity check: requesting [mem %pa-%pa], which spans more than %s %pR\n",
1759  			&addr, &end, p->name, p);
1760  		err = -1;
1761  		break;
1762  	}
1763  	read_unlock(&resource_lock);
1764  
1765  	return err;
1766  }
1767  
1768  #ifdef CONFIG_STRICT_DEVMEM
1769  static int strict_iomem_checks = 1;
1770  #else
1771  static int strict_iomem_checks;
1772  #endif
1773  
1774  /*
1775   * Check if an address is exclusive to the kernel and must not be mapped to
1776   * user space, for example, via /dev/mem.
1777   *
1778   * Returns true if exclusive to the kernel, otherwise returns false.
1779   */
resource_is_exclusive(struct resource * root,u64 addr,resource_size_t size)1780  bool resource_is_exclusive(struct resource *root, u64 addr, resource_size_t size)
1781  {
1782  	const unsigned int exclusive_system_ram = IORESOURCE_SYSTEM_RAM |
1783  						  IORESOURCE_EXCLUSIVE;
1784  	bool skip_children = false, err = false;
1785  	struct resource *p;
1786  
1787  	read_lock(&resource_lock);
1788  	for_each_resource(root, p, skip_children) {
1789  		if (p->start >= addr + size)
1790  			break;
1791  		if (p->end < addr) {
1792  			skip_children = true;
1793  			continue;
1794  		}
1795  		skip_children = false;
1796  
1797  		/*
1798  		 * IORESOURCE_SYSTEM_RAM resources are exclusive if
1799  		 * IORESOURCE_EXCLUSIVE is set, even if they
1800  		 * are not busy and even if "iomem=relaxed" is set. The
1801  		 * responsible driver dynamically adds/removes system RAM within
1802  		 * such an area and uncontrolled access is dangerous.
1803  		 */
1804  		if ((p->flags & exclusive_system_ram) == exclusive_system_ram) {
1805  			err = true;
1806  			break;
1807  		}
1808  
1809  		/*
1810  		 * A resource is exclusive if IORESOURCE_EXCLUSIVE is set
1811  		 * or CONFIG_IO_STRICT_DEVMEM is enabled and the
1812  		 * resource is busy.
1813  		 */
1814  		if (!strict_iomem_checks || !(p->flags & IORESOURCE_BUSY))
1815  			continue;
1816  		if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
1817  				|| p->flags & IORESOURCE_EXCLUSIVE) {
1818  			err = true;
1819  			break;
1820  		}
1821  	}
1822  	read_unlock(&resource_lock);
1823  
1824  	return err;
1825  }
1826  
iomem_is_exclusive(u64 addr)1827  bool iomem_is_exclusive(u64 addr)
1828  {
1829  	return resource_is_exclusive(&iomem_resource, addr & PAGE_MASK,
1830  				     PAGE_SIZE);
1831  }
1832  
resource_list_create_entry(struct resource * res,size_t extra_size)1833  struct resource_entry *resource_list_create_entry(struct resource *res,
1834  						  size_t extra_size)
1835  {
1836  	struct resource_entry *entry;
1837  
1838  	entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL);
1839  	if (entry) {
1840  		INIT_LIST_HEAD(&entry->node);
1841  		entry->res = res ? res : &entry->__res;
1842  	}
1843  
1844  	return entry;
1845  }
1846  EXPORT_SYMBOL(resource_list_create_entry);
1847  
resource_list_free(struct list_head * head)1848  void resource_list_free(struct list_head *head)
1849  {
1850  	struct resource_entry *entry, *tmp;
1851  
1852  	list_for_each_entry_safe(entry, tmp, head, node)
1853  		resource_list_destroy_entry(entry);
1854  }
1855  EXPORT_SYMBOL(resource_list_free);
1856  
1857  #ifdef CONFIG_GET_FREE_REGION
1858  #define GFR_DESCENDING		(1UL << 0)
1859  #define GFR_REQUEST_REGION	(1UL << 1)
1860  #ifdef PA_SECTION_SHIFT
1861  #define GFR_DEFAULT_ALIGN	(1UL << PA_SECTION_SHIFT)
1862  #else
1863  #define GFR_DEFAULT_ALIGN	PAGE_SIZE
1864  #endif
1865  
gfr_start(struct resource * base,resource_size_t size,resource_size_t align,unsigned long flags)1866  static resource_size_t gfr_start(struct resource *base, resource_size_t size,
1867  				 resource_size_t align, unsigned long flags)
1868  {
1869  	if (flags & GFR_DESCENDING) {
1870  		resource_size_t end;
1871  
1872  		end = min_t(resource_size_t, base->end, PHYSMEM_END);
1873  		return end - size + 1;
1874  	}
1875  
1876  	return ALIGN(max(base->start, align), align);
1877  }
1878  
gfr_continue(struct resource * base,resource_size_t addr,resource_size_t size,unsigned long flags)1879  static bool gfr_continue(struct resource *base, resource_size_t addr,
1880  			 resource_size_t size, unsigned long flags)
1881  {
1882  	if (flags & GFR_DESCENDING)
1883  		return addr > size && addr >= base->start;
1884  	/*
1885  	 * In the ascend case be careful that the last increment by
1886  	 * @size did not wrap 0.
1887  	 */
1888  	return addr > addr - size &&
1889  	       addr <= min_t(resource_size_t, base->end, PHYSMEM_END);
1890  }
1891  
gfr_next(resource_size_t addr,resource_size_t size,unsigned long flags)1892  static resource_size_t gfr_next(resource_size_t addr, resource_size_t size,
1893  				unsigned long flags)
1894  {
1895  	if (flags & GFR_DESCENDING)
1896  		return addr - size;
1897  	return addr + size;
1898  }
1899  
remove_free_mem_region(void * _res)1900  static void remove_free_mem_region(void *_res)
1901  {
1902  	struct resource *res = _res;
1903  
1904  	if (res->parent)
1905  		remove_resource(res);
1906  	free_resource(res);
1907  }
1908  
1909  static struct resource *
get_free_mem_region(struct device * dev,struct resource * base,resource_size_t size,const unsigned long align,const char * name,const unsigned long desc,const unsigned long flags)1910  get_free_mem_region(struct device *dev, struct resource *base,
1911  		    resource_size_t size, const unsigned long align,
1912  		    const char *name, const unsigned long desc,
1913  		    const unsigned long flags)
1914  {
1915  	resource_size_t addr;
1916  	struct resource *res;
1917  	struct region_devres *dr = NULL;
1918  
1919  	size = ALIGN(size, align);
1920  
1921  	res = alloc_resource(GFP_KERNEL);
1922  	if (!res)
1923  		return ERR_PTR(-ENOMEM);
1924  
1925  	if (dev && (flags & GFR_REQUEST_REGION)) {
1926  		dr = devres_alloc(devm_region_release,
1927  				sizeof(struct region_devres), GFP_KERNEL);
1928  		if (!dr) {
1929  			free_resource(res);
1930  			return ERR_PTR(-ENOMEM);
1931  		}
1932  	} else if (dev) {
1933  		if (devm_add_action_or_reset(dev, remove_free_mem_region, res))
1934  			return ERR_PTR(-ENOMEM);
1935  	}
1936  
1937  	write_lock(&resource_lock);
1938  	for (addr = gfr_start(base, size, align, flags);
1939  	     gfr_continue(base, addr, align, flags);
1940  	     addr = gfr_next(addr, align, flags)) {
1941  		if (__region_intersects(base, addr, size, 0, IORES_DESC_NONE) !=
1942  		    REGION_DISJOINT)
1943  			continue;
1944  
1945  		if (flags & GFR_REQUEST_REGION) {
1946  			if (__request_region_locked(res, &iomem_resource, addr,
1947  						    size, name, 0))
1948  				break;
1949  
1950  			if (dev) {
1951  				dr->parent = &iomem_resource;
1952  				dr->start = addr;
1953  				dr->n = size;
1954  				devres_add(dev, dr);
1955  			}
1956  
1957  			res->desc = desc;
1958  			write_unlock(&resource_lock);
1959  
1960  
1961  			/*
1962  			 * A driver is claiming this region so revoke any
1963  			 * mappings.
1964  			 */
1965  			revoke_iomem(res);
1966  		} else {
1967  			res->start = addr;
1968  			res->end = addr + size - 1;
1969  			res->name = name;
1970  			res->desc = desc;
1971  			res->flags = IORESOURCE_MEM;
1972  
1973  			/*
1974  			 * Only succeed if the resource hosts an exclusive
1975  			 * range after the insert
1976  			 */
1977  			if (__insert_resource(base, res) || res->child)
1978  				break;
1979  
1980  			write_unlock(&resource_lock);
1981  		}
1982  
1983  		return res;
1984  	}
1985  	write_unlock(&resource_lock);
1986  
1987  	if (flags & GFR_REQUEST_REGION) {
1988  		free_resource(res);
1989  		devres_free(dr);
1990  	} else if (dev)
1991  		devm_release_action(dev, remove_free_mem_region, res);
1992  
1993  	return ERR_PTR(-ERANGE);
1994  }
1995  
1996  /**
1997   * devm_request_free_mem_region - find free region for device private memory
1998   *
1999   * @dev: device struct to bind the resource to
2000   * @size: size in bytes of the device memory to add
2001   * @base: resource tree to look in
2002   *
2003   * This function tries to find an empty range of physical address big enough to
2004   * contain the new resource, so that it can later be hotplugged as ZONE_DEVICE
2005   * memory, which in turn allocates struct pages.
2006   */
devm_request_free_mem_region(struct device * dev,struct resource * base,unsigned long size)2007  struct resource *devm_request_free_mem_region(struct device *dev,
2008  		struct resource *base, unsigned long size)
2009  {
2010  	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2011  
2012  	return get_free_mem_region(dev, base, size, GFR_DEFAULT_ALIGN,
2013  				   dev_name(dev),
2014  				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2015  }
2016  EXPORT_SYMBOL_GPL(devm_request_free_mem_region);
2017  
request_free_mem_region(struct resource * base,unsigned long size,const char * name)2018  struct resource *request_free_mem_region(struct resource *base,
2019  		unsigned long size, const char *name)
2020  {
2021  	unsigned long flags = GFR_DESCENDING | GFR_REQUEST_REGION;
2022  
2023  	return get_free_mem_region(NULL, base, size, GFR_DEFAULT_ALIGN, name,
2024  				   IORES_DESC_DEVICE_PRIVATE_MEMORY, flags);
2025  }
2026  EXPORT_SYMBOL_GPL(request_free_mem_region);
2027  
2028  /**
2029   * alloc_free_mem_region - find a free region relative to @base
2030   * @base: resource that will parent the new resource
2031   * @size: size in bytes of memory to allocate from @base
2032   * @align: alignment requirements for the allocation
2033   * @name: resource name
2034   *
2035   * Buses like CXL, that can dynamically instantiate new memory regions,
2036   * need a method to allocate physical address space for those regions.
2037   * Allocate and insert a new resource to cover a free, unclaimed by a
2038   * descendant of @base, range in the span of @base.
2039   */
alloc_free_mem_region(struct resource * base,unsigned long size,unsigned long align,const char * name)2040  struct resource *alloc_free_mem_region(struct resource *base,
2041  				       unsigned long size, unsigned long align,
2042  				       const char *name)
2043  {
2044  	/* Default of ascending direction and insert resource */
2045  	unsigned long flags = 0;
2046  
2047  	return get_free_mem_region(NULL, base, size, align, name,
2048  				   IORES_DESC_NONE, flags);
2049  }
2050  EXPORT_SYMBOL_GPL(alloc_free_mem_region);
2051  #endif /* CONFIG_GET_FREE_REGION */
2052  
strict_iomem(char * str)2053  static int __init strict_iomem(char *str)
2054  {
2055  	if (strstr(str, "relaxed"))
2056  		strict_iomem_checks = 0;
2057  	if (strstr(str, "strict"))
2058  		strict_iomem_checks = 1;
2059  	return 1;
2060  }
2061  
iomem_fs_init_fs_context(struct fs_context * fc)2062  static int iomem_fs_init_fs_context(struct fs_context *fc)
2063  {
2064  	return init_pseudo(fc, DEVMEM_MAGIC) ? 0 : -ENOMEM;
2065  }
2066  
2067  static struct file_system_type iomem_fs_type = {
2068  	.name		= "iomem",
2069  	.owner		= THIS_MODULE,
2070  	.init_fs_context = iomem_fs_init_fs_context,
2071  	.kill_sb	= kill_anon_super,
2072  };
2073  
iomem_init_inode(void)2074  static int __init iomem_init_inode(void)
2075  {
2076  	static struct vfsmount *iomem_vfs_mount;
2077  	static int iomem_fs_cnt;
2078  	struct inode *inode;
2079  	int rc;
2080  
2081  	rc = simple_pin_fs(&iomem_fs_type, &iomem_vfs_mount, &iomem_fs_cnt);
2082  	if (rc < 0) {
2083  		pr_err("Cannot mount iomem pseudo filesystem: %d\n", rc);
2084  		return rc;
2085  	}
2086  
2087  	inode = alloc_anon_inode(iomem_vfs_mount->mnt_sb);
2088  	if (IS_ERR(inode)) {
2089  		rc = PTR_ERR(inode);
2090  		pr_err("Cannot allocate inode for iomem: %d\n", rc);
2091  		simple_release_fs(&iomem_vfs_mount, &iomem_fs_cnt);
2092  		return rc;
2093  	}
2094  
2095  	/*
2096  	 * Publish iomem revocation inode initialized.
2097  	 * Pairs with smp_load_acquire() in revoke_iomem().
2098  	 */
2099  	smp_store_release(&iomem_inode, inode);
2100  
2101  	return 0;
2102  }
2103  
2104  fs_initcall(iomem_init_inode);
2105  
2106  __setup("iomem=", strict_iomem);
2107