1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Simple MTD partitioning layer
4  *
5  * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
6  * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
7  * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
8  */
9 
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/kmod.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/mtd/partitions.h>
18 #include <linux/err.h>
19 #include <linux/of.h>
20 #include <linux/of_platform.h>
21 
22 #include "mtdcore.h"
23 
24 /*
25  * MTD methods which simply translate the effective address and pass through
26  * to the _real_ device.
27  */
28 
free_partition(struct mtd_info * mtd)29 static inline void free_partition(struct mtd_info *mtd)
30 {
31 	kfree(mtd->name);
32 	kfree(mtd);
33 }
34 
release_mtd_partition(struct mtd_info * mtd)35 void release_mtd_partition(struct mtd_info *mtd)
36 {
37 	WARN_ON(!list_empty(&mtd->part.node));
38 	free_partition(mtd);
39 }
40 
allocate_partition(struct mtd_info * parent,const struct mtd_partition * part,int partno,uint64_t cur_offset)41 static struct mtd_info *allocate_partition(struct mtd_info *parent,
42 					   const struct mtd_partition *part,
43 					   int partno, uint64_t cur_offset)
44 {
45 	struct mtd_info *master = mtd_get_master(parent);
46 	int wr_alignment = (parent->flags & MTD_NO_ERASE) ?
47 			   master->writesize : master->erasesize;
48 	u64 parent_size = mtd_is_partition(parent) ?
49 			  parent->part.size : parent->size;
50 	struct mtd_info *child;
51 	u32 remainder;
52 	char *name;
53 	u64 tmp;
54 
55 	/* allocate the partition structure */
56 	child = kzalloc(sizeof(*child), GFP_KERNEL);
57 	name = kstrdup(part->name, GFP_KERNEL);
58 	if (!name || !child) {
59 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
60 		       parent->name);
61 		kfree(name);
62 		kfree(child);
63 		return ERR_PTR(-ENOMEM);
64 	}
65 
66 	/* set up the MTD object for this partition */
67 	child->type = parent->type;
68 	child->part.flags = parent->flags & ~part->mask_flags;
69 	child->part.flags |= part->add_flags;
70 	child->flags = child->part.flags;
71 	child->part.size = part->size;
72 	child->writesize = parent->writesize;
73 	child->writebufsize = parent->writebufsize;
74 	child->oobsize = parent->oobsize;
75 	child->oobavail = parent->oobavail;
76 	child->subpage_sft = parent->subpage_sft;
77 
78 	child->name = name;
79 	child->owner = parent->owner;
80 
81 	/* NOTE: Historically, we didn't arrange MTDs as a tree out of
82 	 * concern for showing the same data in multiple partitions.
83 	 * However, it is very useful to have the master node present,
84 	 * so the MTD_PARTITIONED_MASTER option allows that. The master
85 	 * will have device nodes etc only if this is set, so make the
86 	 * parent conditional on that option. Note, this is a way to
87 	 * distinguish between the parent and its partitions in sysfs.
88 	 */
89 	child->dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
90 			    &parent->dev : parent->dev.parent;
91 	child->dev.of_node = part->of_node;
92 	child->parent = parent;
93 	child->part.offset = part->offset;
94 	INIT_LIST_HEAD(&child->partitions);
95 
96 	if (child->part.offset == MTDPART_OFS_APPEND)
97 		child->part.offset = cur_offset;
98 	if (child->part.offset == MTDPART_OFS_NXTBLK) {
99 		tmp = cur_offset;
100 		child->part.offset = cur_offset;
101 		remainder = do_div(tmp, wr_alignment);
102 		if (remainder) {
103 			child->part.offset += wr_alignment - remainder;
104 			printk(KERN_NOTICE "Moving partition %d: "
105 			       "0x%012llx -> 0x%012llx\n", partno,
106 			       (unsigned long long)cur_offset,
107 			       child->part.offset);
108 		}
109 	}
110 	if (child->part.offset == MTDPART_OFS_RETAIN) {
111 		child->part.offset = cur_offset;
112 		if (parent_size - child->part.offset >= child->part.size) {
113 			child->part.size = parent_size - child->part.offset -
114 					   child->part.size;
115 		} else {
116 			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
117 				part->name, parent_size - child->part.offset,
118 				child->part.size);
119 			/* register to preserve ordering */
120 			goto out_register;
121 		}
122 	}
123 	if (child->part.size == MTDPART_SIZ_FULL)
124 		child->part.size = parent_size - child->part.offset;
125 
126 	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n",
127 	       child->part.offset, child->part.offset + child->part.size,
128 	       child->name);
129 
130 	/* let's do some sanity checks */
131 	if (child->part.offset >= parent_size) {
132 		/* let's register it anyway to preserve ordering */
133 		child->part.offset = 0;
134 		child->part.size = 0;
135 
136 		/* Initialize ->erasesize to make add_mtd_device() happy. */
137 		child->erasesize = parent->erasesize;
138 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
139 			part->name);
140 		goto out_register;
141 	}
142 	if (child->part.offset + child->part.size > parent->size) {
143 		child->part.size = parent_size - child->part.offset;
144 		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
145 			part->name, parent->name, child->part.size);
146 	}
147 
148 	if (parent->numeraseregions > 1) {
149 		/* Deal with variable erase size stuff */
150 		int i, max = parent->numeraseregions;
151 		u64 end = child->part.offset + child->part.size;
152 		struct mtd_erase_region_info *regions = parent->eraseregions;
153 
154 		/* Find the first erase regions which is part of this
155 		 * partition. */
156 		for (i = 0; i < max && regions[i].offset <= child->part.offset;
157 		     i++)
158 			;
159 		/* The loop searched for the region _behind_ the first one */
160 		if (i > 0)
161 			i--;
162 
163 		/* Pick biggest erasesize */
164 		for (; i < max && regions[i].offset < end; i++) {
165 			if (child->erasesize < regions[i].erasesize)
166 				child->erasesize = regions[i].erasesize;
167 		}
168 		BUG_ON(child->erasesize == 0);
169 	} else {
170 		/* Single erase size */
171 		child->erasesize = master->erasesize;
172 	}
173 
174 	/*
175 	 * Child erasesize might differ from the parent one if the parent
176 	 * exposes several regions with different erasesize. Adjust
177 	 * wr_alignment accordingly.
178 	 */
179 	if (!(child->flags & MTD_NO_ERASE))
180 		wr_alignment = child->erasesize;
181 
182 	tmp = mtd_get_master_ofs(child, 0);
183 	remainder = do_div(tmp, wr_alignment);
184 	if ((child->flags & MTD_WRITEABLE) && remainder) {
185 		/* Doesn't start on a boundary of major erase size */
186 		/* FIXME: Let it be writable if it is on a boundary of
187 		 * _minor_ erase size though */
188 		child->flags &= ~MTD_WRITEABLE;
189 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
190 			part->name);
191 	}
192 
193 	tmp = mtd_get_master_ofs(child, 0) + child->part.size;
194 	remainder = do_div(tmp, wr_alignment);
195 	if ((child->flags & MTD_WRITEABLE) && remainder) {
196 		child->flags &= ~MTD_WRITEABLE;
197 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
198 			part->name);
199 	}
200 
201 	child->size = child->part.size;
202 	child->ecc_step_size = parent->ecc_step_size;
203 	child->ecc_strength = parent->ecc_strength;
204 	child->bitflip_threshold = parent->bitflip_threshold;
205 
206 	if (master->_block_isbad) {
207 		uint64_t offs = 0;
208 
209 		while (offs < child->part.size) {
210 			if (mtd_block_isreserved(child, offs))
211 				child->ecc_stats.bbtblocks++;
212 			else if (mtd_block_isbad(child, offs))
213 				child->ecc_stats.badblocks++;
214 			offs += child->erasesize;
215 		}
216 	}
217 
218 out_register:
219 	return child;
220 }
221 
offset_show(struct device * dev,struct device_attribute * attr,char * buf)222 static ssize_t offset_show(struct device *dev,
223 			   struct device_attribute *attr, char *buf)
224 {
225 	struct mtd_info *mtd = dev_get_drvdata(dev);
226 
227 	return sysfs_emit(buf, "%lld\n", mtd->part.offset);
228 }
229 static DEVICE_ATTR_RO(offset);	/* mtd partition offset */
230 
231 static const struct attribute *mtd_partition_attrs[] = {
232 	&dev_attr_offset.attr,
233 	NULL
234 };
235 
mtd_add_partition_attrs(struct mtd_info * new)236 static int mtd_add_partition_attrs(struct mtd_info *new)
237 {
238 	int ret = sysfs_create_files(&new->dev.kobj, mtd_partition_attrs);
239 	if (ret)
240 		printk(KERN_WARNING
241 		       "mtd: failed to create partition attrs, err=%d\n", ret);
242 	return ret;
243 }
244 
mtd_add_partition(struct mtd_info * parent,const char * name,long long offset,long long length)245 int mtd_add_partition(struct mtd_info *parent, const char *name,
246 		      long long offset, long long length)
247 {
248 	struct mtd_info *master = mtd_get_master(parent);
249 	u64 parent_size = mtd_is_partition(parent) ?
250 			  parent->part.size : parent->size;
251 	struct mtd_partition part;
252 	struct mtd_info *child;
253 	int ret = 0;
254 
255 	/* the direct offset is expected */
256 	if (offset == MTDPART_OFS_APPEND ||
257 	    offset == MTDPART_OFS_NXTBLK)
258 		return -EINVAL;
259 
260 	if (length == MTDPART_SIZ_FULL)
261 		length = parent_size - offset;
262 
263 	if (length <= 0)
264 		return -EINVAL;
265 
266 	memset(&part, 0, sizeof(part));
267 	part.name = name;
268 	part.size = length;
269 	part.offset = offset;
270 
271 	child = allocate_partition(parent, &part, -1, offset);
272 	if (IS_ERR(child))
273 		return PTR_ERR(child);
274 
275 	mutex_lock(&master->master.partitions_lock);
276 	list_add_tail(&child->part.node, &parent->partitions);
277 	mutex_unlock(&master->master.partitions_lock);
278 
279 	ret = add_mtd_device(child);
280 	if (ret)
281 		goto err_remove_part;
282 
283 	mtd_add_partition_attrs(child);
284 
285 	return 0;
286 
287 err_remove_part:
288 	mutex_lock(&master->master.partitions_lock);
289 	list_del(&child->part.node);
290 	mutex_unlock(&master->master.partitions_lock);
291 
292 	free_partition(child);
293 
294 	return ret;
295 }
296 EXPORT_SYMBOL_GPL(mtd_add_partition);
297 
298 /**
299  * __mtd_del_partition - delete MTD partition
300  *
301  * @mtd: MTD structure to be deleted
302  *
303  * This function must be called with the partitions mutex locked.
304  */
__mtd_del_partition(struct mtd_info * mtd)305 static int __mtd_del_partition(struct mtd_info *mtd)
306 {
307 	struct mtd_info *child, *next;
308 	int err;
309 
310 	list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
311 		err = __mtd_del_partition(child);
312 		if (err)
313 			return err;
314 	}
315 
316 	sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
317 
318 	list_del_init(&mtd->part.node);
319 	err = del_mtd_device(mtd);
320 	if (err)
321 		return err;
322 
323 	return 0;
324 }
325 
326 /*
327  * This function unregisters and destroy all slave MTD objects which are
328  * attached to the given MTD object, recursively.
329  */
__del_mtd_partitions(struct mtd_info * mtd)330 static int __del_mtd_partitions(struct mtd_info *mtd)
331 {
332 	struct mtd_info *child, *next;
333 	int ret, err = 0;
334 
335 	list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
336 		if (mtd_has_partitions(child))
337 			__del_mtd_partitions(child);
338 
339 		pr_info("Deleting %s MTD partition\n", child->name);
340 		list_del_init(&child->part.node);
341 		ret = del_mtd_device(child);
342 		if (ret < 0) {
343 			pr_err("Error when deleting partition \"%s\" (%d)\n",
344 			       child->name, ret);
345 			err = ret;
346 			continue;
347 		}
348 	}
349 
350 	return err;
351 }
352 
del_mtd_partitions(struct mtd_info * mtd)353 int del_mtd_partitions(struct mtd_info *mtd)
354 {
355 	struct mtd_info *master = mtd_get_master(mtd);
356 	int ret;
357 
358 	pr_info("Deleting MTD partitions on \"%s\":\n", mtd->name);
359 
360 	mutex_lock(&master->master.partitions_lock);
361 	ret = __del_mtd_partitions(mtd);
362 	mutex_unlock(&master->master.partitions_lock);
363 
364 	return ret;
365 }
366 
mtd_del_partition(struct mtd_info * mtd,int partno)367 int mtd_del_partition(struct mtd_info *mtd, int partno)
368 {
369 	struct mtd_info *child, *master = mtd_get_master(mtd);
370 	int ret = -EINVAL;
371 
372 	mutex_lock(&master->master.partitions_lock);
373 	list_for_each_entry(child, &mtd->partitions, part.node) {
374 		if (child->index == partno) {
375 			ret = __mtd_del_partition(child);
376 			break;
377 		}
378 	}
379 	mutex_unlock(&master->master.partitions_lock);
380 
381 	return ret;
382 }
383 EXPORT_SYMBOL_GPL(mtd_del_partition);
384 
385 /*
386  * This function, given a parent MTD object and a partition table, creates
387  * and registers the child MTD objects which are bound to the parent according
388  * to the partition definitions.
389  *
390  * For historical reasons, this function's caller only registers the parent
391  * if the MTD_PARTITIONED_MASTER config option is set.
392  */
393 
add_mtd_partitions(struct mtd_info * parent,const struct mtd_partition * parts,int nbparts)394 int add_mtd_partitions(struct mtd_info *parent,
395 		       const struct mtd_partition *parts,
396 		       int nbparts)
397 {
398 	struct mtd_info *child, *master = mtd_get_master(parent);
399 	uint64_t cur_offset = 0;
400 	int i, ret;
401 
402 	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n",
403 	       nbparts, parent->name);
404 
405 	for (i = 0; i < nbparts; i++) {
406 		child = allocate_partition(parent, parts + i, i, cur_offset);
407 		if (IS_ERR(child)) {
408 			ret = PTR_ERR(child);
409 			goto err_del_partitions;
410 		}
411 
412 		mutex_lock(&master->master.partitions_lock);
413 		list_add_tail(&child->part.node, &parent->partitions);
414 		mutex_unlock(&master->master.partitions_lock);
415 
416 		ret = add_mtd_device(child);
417 		if (ret) {
418 			mutex_lock(&master->master.partitions_lock);
419 			list_del(&child->part.node);
420 			mutex_unlock(&master->master.partitions_lock);
421 
422 			free_partition(child);
423 			goto err_del_partitions;
424 		}
425 
426 		mtd_add_partition_attrs(child);
427 
428 		/* Look for subpartitions */
429 		ret = parse_mtd_partitions(child, parts[i].types, NULL);
430 		if (ret < 0) {
431 			pr_err("Failed to parse subpartitions: %d\n", ret);
432 			goto err_del_partitions;
433 		}
434 
435 		cur_offset = child->part.offset + child->part.size;
436 	}
437 
438 	return 0;
439 
440 err_del_partitions:
441 	del_mtd_partitions(master);
442 
443 	return ret;
444 }
445 
446 static DEFINE_SPINLOCK(part_parser_lock);
447 static LIST_HEAD(part_parsers);
448 
mtd_part_parser_get(const char * name)449 static struct mtd_part_parser *mtd_part_parser_get(const char *name)
450 {
451 	struct mtd_part_parser *p, *ret = NULL;
452 
453 	spin_lock(&part_parser_lock);
454 
455 	list_for_each_entry(p, &part_parsers, list)
456 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
457 			ret = p;
458 			break;
459 		}
460 
461 	spin_unlock(&part_parser_lock);
462 
463 	return ret;
464 }
465 
mtd_part_parser_put(const struct mtd_part_parser * p)466 static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
467 {
468 	module_put(p->owner);
469 }
470 
471 /*
472  * Many partition parsers just expected the core to kfree() all their data in
473  * one chunk. Do that by default.
474  */
mtd_part_parser_cleanup_default(const struct mtd_partition * pparts,int nr_parts)475 static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
476 					    int nr_parts)
477 {
478 	kfree(pparts);
479 }
480 
__register_mtd_parser(struct mtd_part_parser * p,struct module * owner)481 int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
482 {
483 	p->owner = owner;
484 
485 	if (!p->cleanup)
486 		p->cleanup = &mtd_part_parser_cleanup_default;
487 
488 	spin_lock(&part_parser_lock);
489 	list_add(&p->list, &part_parsers);
490 	spin_unlock(&part_parser_lock);
491 
492 	return 0;
493 }
494 EXPORT_SYMBOL_GPL(__register_mtd_parser);
495 
deregister_mtd_parser(struct mtd_part_parser * p)496 void deregister_mtd_parser(struct mtd_part_parser *p)
497 {
498 	spin_lock(&part_parser_lock);
499 	list_del(&p->list);
500 	spin_unlock(&part_parser_lock);
501 }
502 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
503 
504 /*
505  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
506  * are changing this array!
507  */
508 static const char * const default_mtd_part_types[] = {
509 	"cmdlinepart",
510 	"ofpart",
511 	NULL
512 };
513 
514 /* Check DT only when looking for subpartitions. */
515 static const char * const default_subpartition_types[] = {
516 	"ofpart",
517 	NULL
518 };
519 
mtd_part_do_parse(struct mtd_part_parser * parser,struct mtd_info * master,struct mtd_partitions * pparts,struct mtd_part_parser_data * data)520 static int mtd_part_do_parse(struct mtd_part_parser *parser,
521 			     struct mtd_info *master,
522 			     struct mtd_partitions *pparts,
523 			     struct mtd_part_parser_data *data)
524 {
525 	int ret;
526 
527 	ret = (*parser->parse_fn)(master, &pparts->parts, data);
528 	pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
529 	if (ret <= 0)
530 		return ret;
531 
532 	pr_notice("%d %s partitions found on MTD device %s\n", ret,
533 		  parser->name, master->name);
534 
535 	pparts->nr_parts = ret;
536 	pparts->parser = parser;
537 
538 	return ret;
539 }
540 
541 /**
542  * mtd_part_get_compatible_parser - find MTD parser by a compatible string
543  *
544  * @compat: compatible string describing partitions in a device tree
545  *
546  * MTD parsers can specify supported partitions by providing a table of
547  * compatibility strings. This function finds a parser that advertises support
548  * for a passed value of "compatible".
549  */
mtd_part_get_compatible_parser(const char * compat)550 static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat)
551 {
552 	struct mtd_part_parser *p, *ret = NULL;
553 
554 	spin_lock(&part_parser_lock);
555 
556 	list_for_each_entry(p, &part_parsers, list) {
557 		const struct of_device_id *matches;
558 
559 		matches = p->of_match_table;
560 		if (!matches)
561 			continue;
562 
563 		for (; matches->compatible[0]; matches++) {
564 			if (!strcmp(matches->compatible, compat) &&
565 			    try_module_get(p->owner)) {
566 				ret = p;
567 				break;
568 			}
569 		}
570 
571 		if (ret)
572 			break;
573 	}
574 
575 	spin_unlock(&part_parser_lock);
576 
577 	return ret;
578 }
579 
mtd_part_of_parse(struct mtd_info * master,struct mtd_partitions * pparts)580 static int mtd_part_of_parse(struct mtd_info *master,
581 			     struct mtd_partitions *pparts)
582 {
583 	struct mtd_part_parser *parser;
584 	struct device_node *np;
585 	struct device_node *child;
586 	struct property *prop;
587 	struct device *dev;
588 	const char *compat;
589 	const char *fixed = "fixed-partitions";
590 	int ret, err = 0;
591 
592 	dev = &master->dev;
593 	/* Use parent device (controller) if the top level MTD is not registered */
594 	if (!IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) && !mtd_is_partition(master))
595 		dev = master->dev.parent;
596 
597 	np = mtd_get_of_node(master);
598 	if (mtd_is_partition(master))
599 		of_node_get(np);
600 	else
601 		np = of_get_child_by_name(np, "partitions");
602 
603 	/*
604 	 * Don't create devices that are added to a bus but will never get
605 	 * probed. That'll cause fw_devlink to block probing of consumers of
606 	 * this partition until the partition device is probed.
607 	 */
608 	for_each_child_of_node(np, child)
609 		if (of_device_is_compatible(child, "nvmem-cells"))
610 			of_node_set_flag(child, OF_POPULATED);
611 
612 	of_property_for_each_string(np, "compatible", prop, compat) {
613 		parser = mtd_part_get_compatible_parser(compat);
614 		if (!parser)
615 			continue;
616 		ret = mtd_part_do_parse(parser, master, pparts, NULL);
617 		if (ret > 0) {
618 			of_platform_populate(np, NULL, NULL, dev);
619 			of_node_put(np);
620 			return ret;
621 		}
622 		mtd_part_parser_put(parser);
623 		if (ret < 0 && !err)
624 			err = ret;
625 	}
626 	of_platform_populate(np, NULL, NULL, dev);
627 	of_node_put(np);
628 
629 	/*
630 	 * For backward compatibility we have to try the "fixed-partitions"
631 	 * parser. It supports old DT format with partitions specified as a
632 	 * direct subnodes of a flash device DT node without any compatibility
633 	 * specified we could match.
634 	 */
635 	parser = mtd_part_parser_get(fixed);
636 	if (!parser && !request_module("%s", fixed))
637 		parser = mtd_part_parser_get(fixed);
638 	if (parser) {
639 		ret = mtd_part_do_parse(parser, master, pparts, NULL);
640 		if (ret > 0)
641 			return ret;
642 		mtd_part_parser_put(parser);
643 		if (ret < 0 && !err)
644 			err = ret;
645 	}
646 
647 	return err;
648 }
649 
650 /**
651  * parse_mtd_partitions - parse and register MTD partitions
652  *
653  * @master: the master partition (describes whole MTD device)
654  * @types: names of partition parsers to try or %NULL
655  * @data: MTD partition parser-specific data
656  *
657  * This function tries to find & register partitions on MTD device @master. It
658  * uses MTD partition parsers, specified in @types. However, if @types is %NULL,
659  * then the default list of parsers is used. The default list contains only the
660  * "cmdlinepart" and "ofpart" parsers ATM.
661  * Note: If there are more then one parser in @types, the kernel only takes the
662  * partitions parsed out by the first parser.
663  *
664  * This function may return:
665  * o a negative error code in case of failure
666  * o number of found partitions otherwise
667  */
parse_mtd_partitions(struct mtd_info * master,const char * const * types,struct mtd_part_parser_data * data)668 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
669 			 struct mtd_part_parser_data *data)
670 {
671 	struct mtd_partitions pparts = { };
672 	struct mtd_part_parser *parser;
673 	int ret, err = 0;
674 
675 	if (!types)
676 		types = mtd_is_partition(master) ? default_subpartition_types :
677 			default_mtd_part_types;
678 
679 	for ( ; *types; types++) {
680 		/*
681 		 * ofpart is a special type that means OF partitioning info
682 		 * should be used. It requires a bit different logic so it is
683 		 * handled in a separated function.
684 		 */
685 		if (!strcmp(*types, "ofpart")) {
686 			ret = mtd_part_of_parse(master, &pparts);
687 		} else {
688 			pr_debug("%s: parsing partitions %s\n", master->name,
689 				 *types);
690 			parser = mtd_part_parser_get(*types);
691 			if (!parser && !request_module("%s", *types))
692 				parser = mtd_part_parser_get(*types);
693 			pr_debug("%s: got parser %s\n", master->name,
694 				parser ? parser->name : NULL);
695 			if (!parser)
696 				continue;
697 			ret = mtd_part_do_parse(parser, master, &pparts, data);
698 			if (ret <= 0)
699 				mtd_part_parser_put(parser);
700 		}
701 		/* Found partitions! */
702 		if (ret > 0) {
703 			err = add_mtd_partitions(master, pparts.parts,
704 						 pparts.nr_parts);
705 			mtd_part_parser_cleanup(&pparts);
706 			return err ? err : pparts.nr_parts;
707 		}
708 		/*
709 		 * Stash the first error we see; only report it if no parser
710 		 * succeeds
711 		 */
712 		if (ret < 0 && !err)
713 			err = ret;
714 	}
715 	return err;
716 }
717 
mtd_part_parser_cleanup(struct mtd_partitions * parts)718 void mtd_part_parser_cleanup(struct mtd_partitions *parts)
719 {
720 	const struct mtd_part_parser *parser;
721 
722 	if (!parts)
723 		return;
724 
725 	parser = parts->parser;
726 	if (parser) {
727 		if (parser->cleanup)
728 			parser->cleanup(parts->parts, parts->nr_parts);
729 
730 		mtd_part_parser_put(parser);
731 	}
732 }
733 
734 /* Returns the size of the entire flash chip */
mtd_get_device_size(const struct mtd_info * mtd)735 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
736 {
737 	struct mtd_info *master = mtd_get_master((struct mtd_info *)mtd);
738 
739 	return master->size;
740 }
741 EXPORT_SYMBOL_GPL(mtd_get_device_size);
742