1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/fs/super.c
4   *
5   *  Copyright (C) 1991, 1992  Linus Torvalds
6   *
7   *  super.c contains code to handle: - mount structures
8   *                                   - super-block tables
9   *                                   - filesystem drivers list
10   *                                   - mount system call
11   *                                   - umount system call
12   *                                   - ustat system call
13   *
14   * GK 2/5/95  -  Changed to support mounting the root fs via NFS
15   *
16   *  Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17   *  Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18   *  Added options to /proc/mounts:
19   *    Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
20   *  Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
21   *  Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
22   */
23  
24  #include <linux/export.h>
25  #include <linux/slab.h>
26  #include <linux/blkdev.h>
27  #include <linux/mount.h>
28  #include <linux/security.h>
29  #include <linux/writeback.h>		/* for the emergency remount stuff */
30  #include <linux/idr.h>
31  #include <linux/mutex.h>
32  #include <linux/backing-dev.h>
33  #include <linux/rculist_bl.h>
34  #include <linux/fscrypt.h>
35  #include <linux/fsnotify.h>
36  #include <linux/lockdep.h>
37  #include <linux/user_namespace.h>
38  #include <linux/fs_context.h>
39  #include <uapi/linux/mount.h>
40  #include "internal.h"
41  
42  static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
43  
44  static LIST_HEAD(super_blocks);
45  static DEFINE_SPINLOCK(sb_lock);
46  
47  static char *sb_writers_name[SB_FREEZE_LEVELS] = {
48  	"sb_writers",
49  	"sb_pagefaults",
50  	"sb_internal",
51  };
52  
__super_lock(struct super_block * sb,bool excl)53  static inline void __super_lock(struct super_block *sb, bool excl)
54  {
55  	if (excl)
56  		down_write(&sb->s_umount);
57  	else
58  		down_read(&sb->s_umount);
59  }
60  
super_unlock(struct super_block * sb,bool excl)61  static inline void super_unlock(struct super_block *sb, bool excl)
62  {
63  	if (excl)
64  		up_write(&sb->s_umount);
65  	else
66  		up_read(&sb->s_umount);
67  }
68  
__super_lock_excl(struct super_block * sb)69  static inline void __super_lock_excl(struct super_block *sb)
70  {
71  	__super_lock(sb, true);
72  }
73  
super_unlock_excl(struct super_block * sb)74  static inline void super_unlock_excl(struct super_block *sb)
75  {
76  	super_unlock(sb, true);
77  }
78  
super_unlock_shared(struct super_block * sb)79  static inline void super_unlock_shared(struct super_block *sb)
80  {
81  	super_unlock(sb, false);
82  }
83  
super_flags(const struct super_block * sb,unsigned int flags)84  static bool super_flags(const struct super_block *sb, unsigned int flags)
85  {
86  	/*
87  	 * Pairs with smp_store_release() in super_wake() and ensures
88  	 * that we see @flags after we're woken.
89  	 */
90  	return smp_load_acquire(&sb->s_flags) & flags;
91  }
92  
93  /**
94   * super_lock - wait for superblock to become ready and lock it
95   * @sb: superblock to wait for
96   * @excl: whether exclusive access is required
97   *
98   * If the superblock has neither passed through vfs_get_tree() or
99   * generic_shutdown_super() yet wait for it to happen. Either superblock
100   * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
101   * woken and we'll see SB_DYING.
102   *
103   * The caller must have acquired a temporary reference on @sb->s_count.
104   *
105   * Return: The function returns true if SB_BORN was set and with
106   *         s_umount held. The function returns false if SB_DYING was
107   *         set and without s_umount held.
108   */
super_lock(struct super_block * sb,bool excl)109  static __must_check bool super_lock(struct super_block *sb, bool excl)
110  {
111  	lockdep_assert_not_held(&sb->s_umount);
112  
113  	/* wait until the superblock is ready or dying */
114  	wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
115  
116  	/* Don't pointlessly acquire s_umount. */
117  	if (super_flags(sb, SB_DYING))
118  		return false;
119  
120  	__super_lock(sb, excl);
121  
122  	/*
123  	 * Has gone through generic_shutdown_super() in the meantime.
124  	 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
125  	 * grab a reference to this. Tell them so.
126  	 */
127  	if (sb->s_flags & SB_DYING) {
128  		super_unlock(sb, excl);
129  		return false;
130  	}
131  
132  	WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
133  	return true;
134  }
135  
136  /* wait and try to acquire read-side of @sb->s_umount */
super_lock_shared(struct super_block * sb)137  static inline bool super_lock_shared(struct super_block *sb)
138  {
139  	return super_lock(sb, false);
140  }
141  
142  /* wait and try to acquire write-side of @sb->s_umount */
super_lock_excl(struct super_block * sb)143  static inline bool super_lock_excl(struct super_block *sb)
144  {
145  	return super_lock(sb, true);
146  }
147  
148  /* wake waiters */
149  #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
super_wake(struct super_block * sb,unsigned int flag)150  static void super_wake(struct super_block *sb, unsigned int flag)
151  {
152  	WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
153  	WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
154  
155  	/*
156  	 * Pairs with smp_load_acquire() in super_lock() to make sure
157  	 * all initializations in the superblock are seen by the user
158  	 * seeing SB_BORN sent.
159  	 */
160  	smp_store_release(&sb->s_flags, sb->s_flags | flag);
161  	/*
162  	 * Pairs with the barrier in prepare_to_wait_event() to make sure
163  	 * ___wait_var_event() either sees SB_BORN set or
164  	 * waitqueue_active() check in wake_up_var() sees the waiter.
165  	 */
166  	smp_mb();
167  	wake_up_var(&sb->s_flags);
168  }
169  
170  /*
171   * One thing we have to be careful of with a per-sb shrinker is that we don't
172   * drop the last active reference to the superblock from within the shrinker.
173   * If that happens we could trigger unregistering the shrinker from within the
174   * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
175   * take a passive reference to the superblock to avoid this from occurring.
176   */
super_cache_scan(struct shrinker * shrink,struct shrink_control * sc)177  static unsigned long super_cache_scan(struct shrinker *shrink,
178  				      struct shrink_control *sc)
179  {
180  	struct super_block *sb;
181  	long	fs_objects = 0;
182  	long	total_objects;
183  	long	freed = 0;
184  	long	dentries;
185  	long	inodes;
186  
187  	sb = shrink->private_data;
188  
189  	/*
190  	 * Deadlock avoidance.  We may hold various FS locks, and we don't want
191  	 * to recurse into the FS that called us in clear_inode() and friends..
192  	 */
193  	if (!(sc->gfp_mask & __GFP_FS))
194  		return SHRINK_STOP;
195  
196  	if (!super_trylock_shared(sb))
197  		return SHRINK_STOP;
198  
199  	if (sb->s_op->nr_cached_objects)
200  		fs_objects = sb->s_op->nr_cached_objects(sb, sc);
201  
202  	inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
203  	dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
204  	total_objects = dentries + inodes + fs_objects + 1;
205  	if (!total_objects)
206  		total_objects = 1;
207  
208  	/* proportion the scan between the caches */
209  	dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
210  	inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
211  	fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
212  
213  	/*
214  	 * prune the dcache first as the icache is pinned by it, then
215  	 * prune the icache, followed by the filesystem specific caches
216  	 *
217  	 * Ensure that we always scan at least one object - memcg kmem
218  	 * accounting uses this to fully empty the caches.
219  	 */
220  	sc->nr_to_scan = dentries + 1;
221  	freed = prune_dcache_sb(sb, sc);
222  	sc->nr_to_scan = inodes + 1;
223  	freed += prune_icache_sb(sb, sc);
224  
225  	if (fs_objects) {
226  		sc->nr_to_scan = fs_objects + 1;
227  		freed += sb->s_op->free_cached_objects(sb, sc);
228  	}
229  
230  	super_unlock_shared(sb);
231  	return freed;
232  }
233  
super_cache_count(struct shrinker * shrink,struct shrink_control * sc)234  static unsigned long super_cache_count(struct shrinker *shrink,
235  				       struct shrink_control *sc)
236  {
237  	struct super_block *sb;
238  	long	total_objects = 0;
239  
240  	sb = shrink->private_data;
241  
242  	/*
243  	 * We don't call super_trylock_shared() here as it is a scalability
244  	 * bottleneck, so we're exposed to partial setup state. The shrinker
245  	 * rwsem does not protect filesystem operations backing
246  	 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
247  	 * change between super_cache_count and super_cache_scan, so we really
248  	 * don't need locks here.
249  	 *
250  	 * However, if we are currently mounting the superblock, the underlying
251  	 * filesystem might be in a state of partial construction and hence it
252  	 * is dangerous to access it.  super_trylock_shared() uses a SB_BORN check
253  	 * to avoid this situation, so do the same here. The memory barrier is
254  	 * matched with the one in mount_fs() as we don't hold locks here.
255  	 */
256  	if (!(sb->s_flags & SB_BORN))
257  		return 0;
258  	smp_rmb();
259  
260  	if (sb->s_op && sb->s_op->nr_cached_objects)
261  		total_objects = sb->s_op->nr_cached_objects(sb, sc);
262  
263  	total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
264  	total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
265  
266  	if (!total_objects)
267  		return SHRINK_EMPTY;
268  
269  	total_objects = vfs_pressure_ratio(total_objects);
270  	return total_objects;
271  }
272  
destroy_super_work(struct work_struct * work)273  static void destroy_super_work(struct work_struct *work)
274  {
275  	struct super_block *s = container_of(work, struct super_block,
276  							destroy_work);
277  	fsnotify_sb_free(s);
278  	security_sb_free(s);
279  	put_user_ns(s->s_user_ns);
280  	kfree(s->s_subtype);
281  	for (int i = 0; i < SB_FREEZE_LEVELS; i++)
282  		percpu_free_rwsem(&s->s_writers.rw_sem[i]);
283  	kfree(s);
284  }
285  
destroy_super_rcu(struct rcu_head * head)286  static void destroy_super_rcu(struct rcu_head *head)
287  {
288  	struct super_block *s = container_of(head, struct super_block, rcu);
289  	INIT_WORK(&s->destroy_work, destroy_super_work);
290  	schedule_work(&s->destroy_work);
291  }
292  
293  /* Free a superblock that has never been seen by anyone */
destroy_unused_super(struct super_block * s)294  static void destroy_unused_super(struct super_block *s)
295  {
296  	if (!s)
297  		return;
298  	super_unlock_excl(s);
299  	list_lru_destroy(&s->s_dentry_lru);
300  	list_lru_destroy(&s->s_inode_lru);
301  	shrinker_free(s->s_shrink);
302  	/* no delays needed */
303  	destroy_super_work(&s->destroy_work);
304  }
305  
306  /**
307   *	alloc_super	-	create new superblock
308   *	@type:	filesystem type superblock should belong to
309   *	@flags: the mount flags
310   *	@user_ns: User namespace for the super_block
311   *
312   *	Allocates and initializes a new &struct super_block.  alloc_super()
313   *	returns a pointer new superblock or %NULL if allocation had failed.
314   */
alloc_super(struct file_system_type * type,int flags,struct user_namespace * user_ns)315  static struct super_block *alloc_super(struct file_system_type *type, int flags,
316  				       struct user_namespace *user_ns)
317  {
318  	struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
319  	static const struct super_operations default_op;
320  	int i;
321  
322  	if (!s)
323  		return NULL;
324  
325  	INIT_LIST_HEAD(&s->s_mounts);
326  	s->s_user_ns = get_user_ns(user_ns);
327  	init_rwsem(&s->s_umount);
328  	lockdep_set_class(&s->s_umount, &type->s_umount_key);
329  	/*
330  	 * sget() can have s_umount recursion.
331  	 *
332  	 * When it cannot find a suitable sb, it allocates a new
333  	 * one (this one), and tries again to find a suitable old
334  	 * one.
335  	 *
336  	 * In case that succeeds, it will acquire the s_umount
337  	 * lock of the old one. Since these are clearly distrinct
338  	 * locks, and this object isn't exposed yet, there's no
339  	 * risk of deadlocks.
340  	 *
341  	 * Annotate this by putting this lock in a different
342  	 * subclass.
343  	 */
344  	down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
345  
346  	if (security_sb_alloc(s))
347  		goto fail;
348  
349  	for (i = 0; i < SB_FREEZE_LEVELS; i++) {
350  		if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
351  					sb_writers_name[i],
352  					&type->s_writers_key[i]))
353  			goto fail;
354  	}
355  	s->s_bdi = &noop_backing_dev_info;
356  	s->s_flags = flags;
357  	if (s->s_user_ns != &init_user_ns)
358  		s->s_iflags |= SB_I_NODEV;
359  	INIT_HLIST_NODE(&s->s_instances);
360  	INIT_HLIST_BL_HEAD(&s->s_roots);
361  	mutex_init(&s->s_sync_lock);
362  	INIT_LIST_HEAD(&s->s_inodes);
363  	spin_lock_init(&s->s_inode_list_lock);
364  	INIT_LIST_HEAD(&s->s_inodes_wb);
365  	spin_lock_init(&s->s_inode_wblist_lock);
366  
367  	s->s_count = 1;
368  	atomic_set(&s->s_active, 1);
369  	mutex_init(&s->s_vfs_rename_mutex);
370  	lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
371  	init_rwsem(&s->s_dquot.dqio_sem);
372  	s->s_maxbytes = MAX_NON_LFS;
373  	s->s_op = &default_op;
374  	s->s_time_gran = 1000000000;
375  	s->s_time_min = TIME64_MIN;
376  	s->s_time_max = TIME64_MAX;
377  
378  	s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
379  				     "sb-%s", type->name);
380  	if (!s->s_shrink)
381  		goto fail;
382  
383  	s->s_shrink->scan_objects = super_cache_scan;
384  	s->s_shrink->count_objects = super_cache_count;
385  	s->s_shrink->batch = 1024;
386  	s->s_shrink->private_data = s;
387  
388  	if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink))
389  		goto fail;
390  	if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink))
391  		goto fail;
392  	return s;
393  
394  fail:
395  	destroy_unused_super(s);
396  	return NULL;
397  }
398  
399  /* Superblock refcounting  */
400  
401  /*
402   * Drop a superblock's refcount.  The caller must hold sb_lock.
403   */
__put_super(struct super_block * s)404  static void __put_super(struct super_block *s)
405  {
406  	if (!--s->s_count) {
407  		list_del_init(&s->s_list);
408  		WARN_ON(s->s_dentry_lru.node);
409  		WARN_ON(s->s_inode_lru.node);
410  		WARN_ON(!list_empty(&s->s_mounts));
411  		call_rcu(&s->rcu, destroy_super_rcu);
412  	}
413  }
414  
415  /**
416   *	put_super	-	drop a temporary reference to superblock
417   *	@sb: superblock in question
418   *
419   *	Drops a temporary reference, frees superblock if there's no
420   *	references left.
421   */
put_super(struct super_block * sb)422  void put_super(struct super_block *sb)
423  {
424  	spin_lock(&sb_lock);
425  	__put_super(sb);
426  	spin_unlock(&sb_lock);
427  }
428  
kill_super_notify(struct super_block * sb)429  static void kill_super_notify(struct super_block *sb)
430  {
431  	lockdep_assert_not_held(&sb->s_umount);
432  
433  	/* already notified earlier */
434  	if (sb->s_flags & SB_DEAD)
435  		return;
436  
437  	/*
438  	 * Remove it from @fs_supers so it isn't found by new
439  	 * sget{_fc}() walkers anymore. Any concurrent mounter still
440  	 * managing to grab a temporary reference is guaranteed to
441  	 * already see SB_DYING and will wait until we notify them about
442  	 * SB_DEAD.
443  	 */
444  	spin_lock(&sb_lock);
445  	hlist_del_init(&sb->s_instances);
446  	spin_unlock(&sb_lock);
447  
448  	/*
449  	 * Let concurrent mounts know that this thing is really dead.
450  	 * We don't need @sb->s_umount here as every concurrent caller
451  	 * will see SB_DYING and either discard the superblock or wait
452  	 * for SB_DEAD.
453  	 */
454  	super_wake(sb, SB_DEAD);
455  }
456  
457  /**
458   *	deactivate_locked_super	-	drop an active reference to superblock
459   *	@s: superblock to deactivate
460   *
461   *	Drops an active reference to superblock, converting it into a temporary
462   *	one if there is no other active references left.  In that case we
463   *	tell fs driver to shut it down and drop the temporary reference we
464   *	had just acquired.
465   *
466   *	Caller holds exclusive lock on superblock; that lock is released.
467   */
deactivate_locked_super(struct super_block * s)468  void deactivate_locked_super(struct super_block *s)
469  {
470  	struct file_system_type *fs = s->s_type;
471  	if (atomic_dec_and_test(&s->s_active)) {
472  		shrinker_free(s->s_shrink);
473  		fs->kill_sb(s);
474  
475  		kill_super_notify(s);
476  
477  		/*
478  		 * Since list_lru_destroy() may sleep, we cannot call it from
479  		 * put_super(), where we hold the sb_lock. Therefore we destroy
480  		 * the lru lists right now.
481  		 */
482  		list_lru_destroy(&s->s_dentry_lru);
483  		list_lru_destroy(&s->s_inode_lru);
484  
485  		put_filesystem(fs);
486  		put_super(s);
487  	} else {
488  		super_unlock_excl(s);
489  	}
490  }
491  
492  EXPORT_SYMBOL(deactivate_locked_super);
493  
494  /**
495   *	deactivate_super	-	drop an active reference to superblock
496   *	@s: superblock to deactivate
497   *
498   *	Variant of deactivate_locked_super(), except that superblock is *not*
499   *	locked by caller.  If we are going to drop the final active reference,
500   *	lock will be acquired prior to that.
501   */
deactivate_super(struct super_block * s)502  void deactivate_super(struct super_block *s)
503  {
504  	if (!atomic_add_unless(&s->s_active, -1, 1)) {
505  		__super_lock_excl(s);
506  		deactivate_locked_super(s);
507  	}
508  }
509  
510  EXPORT_SYMBOL(deactivate_super);
511  
512  /**
513   * grab_super - acquire an active reference to a superblock
514   * @sb: superblock to acquire
515   *
516   * Acquire a temporary reference on a superblock and try to trade it for
517   * an active reference. This is used in sget{_fc}() to wait for a
518   * superblock to either become SB_BORN or for it to pass through
519   * sb->kill() and be marked as SB_DEAD.
520   *
521   * Return: This returns true if an active reference could be acquired,
522   *         false if not.
523   */
grab_super(struct super_block * sb)524  static bool grab_super(struct super_block *sb)
525  {
526  	bool locked;
527  
528  	sb->s_count++;
529  	spin_unlock(&sb_lock);
530  	locked = super_lock_excl(sb);
531  	if (locked) {
532  		if (atomic_inc_not_zero(&sb->s_active)) {
533  			put_super(sb);
534  			return true;
535  		}
536  		super_unlock_excl(sb);
537  	}
538  	wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
539  	put_super(sb);
540  	return false;
541  }
542  
543  /*
544   *	super_trylock_shared - try to grab ->s_umount shared
545   *	@sb: reference we are trying to grab
546   *
547   *	Try to prevent fs shutdown.  This is used in places where we
548   *	cannot take an active reference but we need to ensure that the
549   *	filesystem is not shut down while we are working on it. It returns
550   *	false if we cannot acquire s_umount or if we lose the race and
551   *	filesystem already got into shutdown, and returns true with the s_umount
552   *	lock held in read mode in case of success. On successful return,
553   *	the caller must drop the s_umount lock when done.
554   *
555   *	Note that unlike get_super() et.al. this one does *not* bump ->s_count.
556   *	The reason why it's safe is that we are OK with doing trylock instead
557   *	of down_read().  There's a couple of places that are OK with that, but
558   *	it's very much not a general-purpose interface.
559   */
super_trylock_shared(struct super_block * sb)560  bool super_trylock_shared(struct super_block *sb)
561  {
562  	if (down_read_trylock(&sb->s_umount)) {
563  		if (!(sb->s_flags & SB_DYING) && sb->s_root &&
564  		    (sb->s_flags & SB_BORN))
565  			return true;
566  		super_unlock_shared(sb);
567  	}
568  
569  	return false;
570  }
571  
572  /**
573   *	retire_super	-	prevents superblock from being reused
574   *	@sb: superblock to retire
575   *
576   *	The function marks superblock to be ignored in superblock test, which
577   *	prevents it from being reused for any new mounts.  If the superblock has
578   *	a private bdi, it also unregisters it, but doesn't reduce the refcount
579   *	of the superblock to prevent potential races.  The refcount is reduced
580   *	by generic_shutdown_super().  The function can not be called
581   *	concurrently with generic_shutdown_super().  It is safe to call the
582   *	function multiple times, subsequent calls have no effect.
583   *
584   *	The marker will affect the re-use only for block-device-based
585   *	superblocks.  Other superblocks will still get marked if this function
586   *	is used, but that will not affect their reusability.
587   */
retire_super(struct super_block * sb)588  void retire_super(struct super_block *sb)
589  {
590  	WARN_ON(!sb->s_bdev);
591  	__super_lock_excl(sb);
592  	if (sb->s_iflags & SB_I_PERSB_BDI) {
593  		bdi_unregister(sb->s_bdi);
594  		sb->s_iflags &= ~SB_I_PERSB_BDI;
595  	}
596  	sb->s_iflags |= SB_I_RETIRED;
597  	super_unlock_excl(sb);
598  }
599  EXPORT_SYMBOL(retire_super);
600  
601  /**
602   *	generic_shutdown_super	-	common helper for ->kill_sb()
603   *	@sb: superblock to kill
604   *
605   *	generic_shutdown_super() does all fs-independent work on superblock
606   *	shutdown.  Typical ->kill_sb() should pick all fs-specific objects
607   *	that need destruction out of superblock, call generic_shutdown_super()
608   *	and release aforementioned objects.  Note: dentries and inodes _are_
609   *	taken care of and do not need specific handling.
610   *
611   *	Upon calling this function, the filesystem may no longer alter or
612   *	rearrange the set of dentries belonging to this super_block, nor may it
613   *	change the attachments of dentries to inodes.
614   */
generic_shutdown_super(struct super_block * sb)615  void generic_shutdown_super(struct super_block *sb)
616  {
617  	const struct super_operations *sop = sb->s_op;
618  
619  	if (sb->s_root) {
620  		shrink_dcache_for_umount(sb);
621  		sync_filesystem(sb);
622  		sb->s_flags &= ~SB_ACTIVE;
623  
624  		cgroup_writeback_umount(sb);
625  
626  		/* Evict all inodes with zero refcount. */
627  		evict_inodes(sb);
628  
629  		/*
630  		 * Clean up and evict any inodes that still have references due
631  		 * to fsnotify or the security policy.
632  		 */
633  		fsnotify_sb_delete(sb);
634  		security_sb_delete(sb);
635  
636  		if (sb->s_dio_done_wq) {
637  			destroy_workqueue(sb->s_dio_done_wq);
638  			sb->s_dio_done_wq = NULL;
639  		}
640  
641  		if (sop->put_super)
642  			sop->put_super(sb);
643  
644  		/*
645  		 * Now that all potentially-encrypted inodes have been evicted,
646  		 * the fscrypt keyring can be destroyed.
647  		 */
648  		fscrypt_destroy_keyring(sb);
649  
650  		if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
651  				"VFS: Busy inodes after unmount of %s (%s)",
652  				sb->s_id, sb->s_type->name)) {
653  			/*
654  			 * Adding a proper bailout path here would be hard, but
655  			 * we can at least make it more likely that a later
656  			 * iput_final() or such crashes cleanly.
657  			 */
658  			struct inode *inode;
659  
660  			spin_lock(&sb->s_inode_list_lock);
661  			list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
662  				inode->i_op = VFS_PTR_POISON;
663  				inode->i_sb = VFS_PTR_POISON;
664  				inode->i_mapping = VFS_PTR_POISON;
665  			}
666  			spin_unlock(&sb->s_inode_list_lock);
667  		}
668  	}
669  	/*
670  	 * Broadcast to everyone that grabbed a temporary reference to this
671  	 * superblock before we removed it from @fs_supers that the superblock
672  	 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
673  	 * discard this superblock and treat it as dead.
674  	 *
675  	 * We leave the superblock on @fs_supers so it can be found by
676  	 * sget{_fc}() until we passed sb->kill_sb().
677  	 */
678  	super_wake(sb, SB_DYING);
679  	super_unlock_excl(sb);
680  	if (sb->s_bdi != &noop_backing_dev_info) {
681  		if (sb->s_iflags & SB_I_PERSB_BDI)
682  			bdi_unregister(sb->s_bdi);
683  		bdi_put(sb->s_bdi);
684  		sb->s_bdi = &noop_backing_dev_info;
685  	}
686  }
687  
688  EXPORT_SYMBOL(generic_shutdown_super);
689  
mount_capable(struct fs_context * fc)690  bool mount_capable(struct fs_context *fc)
691  {
692  	if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
693  		return capable(CAP_SYS_ADMIN);
694  	else
695  		return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
696  }
697  
698  /**
699   * sget_fc - Find or create a superblock
700   * @fc:	Filesystem context.
701   * @test: Comparison callback
702   * @set: Setup callback
703   *
704   * Create a new superblock or find an existing one.
705   *
706   * The @test callback is used to find a matching existing superblock.
707   * Whether or not the requested parameters in @fc are taken into account
708   * is specific to the @test callback that is used. They may even be
709   * completely ignored.
710   *
711   * If an extant superblock is matched, it will be returned unless:
712   *
713   * (1) the namespace the filesystem context @fc and the extant
714   *     superblock's namespace differ
715   *
716   * (2) the filesystem context @fc has requested that reusing an extant
717   *     superblock is not allowed
718   *
719   * In both cases EBUSY will be returned.
720   *
721   * If no match is made, a new superblock will be allocated and basic
722   * initialisation will be performed (s_type, s_fs_info and s_id will be
723   * set and the @set callback will be invoked), the superblock will be
724   * published and it will be returned in a partially constructed state
725   * with SB_BORN and SB_ACTIVE as yet unset.
726   *
727   * Return: On success, an extant or newly created superblock is
728   *         returned. On failure an error pointer is returned.
729   */
sget_fc(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* set)(struct super_block *,struct fs_context *))730  struct super_block *sget_fc(struct fs_context *fc,
731  			    int (*test)(struct super_block *, struct fs_context *),
732  			    int (*set)(struct super_block *, struct fs_context *))
733  {
734  	struct super_block *s = NULL;
735  	struct super_block *old;
736  	struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
737  	int err;
738  
739  	/*
740  	 * Never allow s_user_ns != &init_user_ns when FS_USERNS_MOUNT is
741  	 * not set, as the filesystem is likely unprepared to handle it.
742  	 * This can happen when fsconfig() is called from init_user_ns with
743  	 * an fs_fd opened in another user namespace.
744  	 */
745  	if (user_ns != &init_user_ns && !(fc->fs_type->fs_flags & FS_USERNS_MOUNT)) {
746  		errorfc(fc, "VFS: Mounting from non-initial user namespace is not allowed");
747  		return ERR_PTR(-EPERM);
748  	}
749  
750  retry:
751  	spin_lock(&sb_lock);
752  	if (test) {
753  		hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
754  			if (test(old, fc))
755  				goto share_extant_sb;
756  		}
757  	}
758  	if (!s) {
759  		spin_unlock(&sb_lock);
760  		s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
761  		if (!s)
762  			return ERR_PTR(-ENOMEM);
763  		goto retry;
764  	}
765  
766  	s->s_fs_info = fc->s_fs_info;
767  	err = set(s, fc);
768  	if (err) {
769  		s->s_fs_info = NULL;
770  		spin_unlock(&sb_lock);
771  		destroy_unused_super(s);
772  		return ERR_PTR(err);
773  	}
774  	fc->s_fs_info = NULL;
775  	s->s_type = fc->fs_type;
776  	s->s_iflags |= fc->s_iflags;
777  	strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
778  	/*
779  	 * Make the superblock visible on @super_blocks and @fs_supers.
780  	 * It's in a nascent state and users should wait on SB_BORN or
781  	 * SB_DYING to be set.
782  	 */
783  	list_add_tail(&s->s_list, &super_blocks);
784  	hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
785  	spin_unlock(&sb_lock);
786  	get_filesystem(s->s_type);
787  	shrinker_register(s->s_shrink);
788  	return s;
789  
790  share_extant_sb:
791  	if (user_ns != old->s_user_ns || fc->exclusive) {
792  		spin_unlock(&sb_lock);
793  		destroy_unused_super(s);
794  		if (fc->exclusive)
795  			warnfc(fc, "reusing existing filesystem not allowed");
796  		else
797  			warnfc(fc, "reusing existing filesystem in another namespace not allowed");
798  		return ERR_PTR(-EBUSY);
799  	}
800  	if (!grab_super(old))
801  		goto retry;
802  	destroy_unused_super(s);
803  	return old;
804  }
805  EXPORT_SYMBOL(sget_fc);
806  
807  /**
808   *	sget	-	find or create a superblock
809   *	@type:	  filesystem type superblock should belong to
810   *	@test:	  comparison callback
811   *	@set:	  setup callback
812   *	@flags:	  mount flags
813   *	@data:	  argument to each of them
814   */
sget(struct file_system_type * type,int (* test)(struct super_block *,void *),int (* set)(struct super_block *,void *),int flags,void * data)815  struct super_block *sget(struct file_system_type *type,
816  			int (*test)(struct super_block *,void *),
817  			int (*set)(struct super_block *,void *),
818  			int flags,
819  			void *data)
820  {
821  	struct user_namespace *user_ns = current_user_ns();
822  	struct super_block *s = NULL;
823  	struct super_block *old;
824  	int err;
825  
826  	/* We don't yet pass the user namespace of the parent
827  	 * mount through to here so always use &init_user_ns
828  	 * until that changes.
829  	 */
830  	if (flags & SB_SUBMOUNT)
831  		user_ns = &init_user_ns;
832  
833  retry:
834  	spin_lock(&sb_lock);
835  	if (test) {
836  		hlist_for_each_entry(old, &type->fs_supers, s_instances) {
837  			if (!test(old, data))
838  				continue;
839  			if (user_ns != old->s_user_ns) {
840  				spin_unlock(&sb_lock);
841  				destroy_unused_super(s);
842  				return ERR_PTR(-EBUSY);
843  			}
844  			if (!grab_super(old))
845  				goto retry;
846  			destroy_unused_super(s);
847  			return old;
848  		}
849  	}
850  	if (!s) {
851  		spin_unlock(&sb_lock);
852  		s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
853  		if (!s)
854  			return ERR_PTR(-ENOMEM);
855  		goto retry;
856  	}
857  
858  	err = set(s, data);
859  	if (err) {
860  		spin_unlock(&sb_lock);
861  		destroy_unused_super(s);
862  		return ERR_PTR(err);
863  	}
864  	s->s_type = type;
865  	strscpy(s->s_id, type->name, sizeof(s->s_id));
866  	list_add_tail(&s->s_list, &super_blocks);
867  	hlist_add_head(&s->s_instances, &type->fs_supers);
868  	spin_unlock(&sb_lock);
869  	get_filesystem(type);
870  	shrinker_register(s->s_shrink);
871  	return s;
872  }
873  EXPORT_SYMBOL(sget);
874  
drop_super(struct super_block * sb)875  void drop_super(struct super_block *sb)
876  {
877  	super_unlock_shared(sb);
878  	put_super(sb);
879  }
880  
881  EXPORT_SYMBOL(drop_super);
882  
drop_super_exclusive(struct super_block * sb)883  void drop_super_exclusive(struct super_block *sb)
884  {
885  	super_unlock_excl(sb);
886  	put_super(sb);
887  }
888  EXPORT_SYMBOL(drop_super_exclusive);
889  
__iterate_supers(void (* f)(struct super_block *))890  static void __iterate_supers(void (*f)(struct super_block *))
891  {
892  	struct super_block *sb, *p = NULL;
893  
894  	spin_lock(&sb_lock);
895  	list_for_each_entry(sb, &super_blocks, s_list) {
896  		if (super_flags(sb, SB_DYING))
897  			continue;
898  		sb->s_count++;
899  		spin_unlock(&sb_lock);
900  
901  		f(sb);
902  
903  		spin_lock(&sb_lock);
904  		if (p)
905  			__put_super(p);
906  		p = sb;
907  	}
908  	if (p)
909  		__put_super(p);
910  	spin_unlock(&sb_lock);
911  }
912  /**
913   *	iterate_supers - call function for all active superblocks
914   *	@f: function to call
915   *	@arg: argument to pass to it
916   *
917   *	Scans the superblock list and calls given function, passing it
918   *	locked superblock and given argument.
919   */
iterate_supers(void (* f)(struct super_block *,void *),void * arg)920  void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
921  {
922  	struct super_block *sb, *p = NULL;
923  
924  	spin_lock(&sb_lock);
925  	list_for_each_entry(sb, &super_blocks, s_list) {
926  		bool locked;
927  
928  		sb->s_count++;
929  		spin_unlock(&sb_lock);
930  
931  		locked = super_lock_shared(sb);
932  		if (locked) {
933  			if (sb->s_root)
934  				f(sb, arg);
935  			super_unlock_shared(sb);
936  		}
937  
938  		spin_lock(&sb_lock);
939  		if (p)
940  			__put_super(p);
941  		p = sb;
942  	}
943  	if (p)
944  		__put_super(p);
945  	spin_unlock(&sb_lock);
946  }
947  
948  /**
949   *	iterate_supers_type - call function for superblocks of given type
950   *	@type: fs type
951   *	@f: function to call
952   *	@arg: argument to pass to it
953   *
954   *	Scans the superblock list and calls given function, passing it
955   *	locked superblock and given argument.
956   */
iterate_supers_type(struct file_system_type * type,void (* f)(struct super_block *,void *),void * arg)957  void iterate_supers_type(struct file_system_type *type,
958  	void (*f)(struct super_block *, void *), void *arg)
959  {
960  	struct super_block *sb, *p = NULL;
961  
962  	spin_lock(&sb_lock);
963  	hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
964  		bool locked;
965  
966  		sb->s_count++;
967  		spin_unlock(&sb_lock);
968  
969  		locked = super_lock_shared(sb);
970  		if (locked) {
971  			if (sb->s_root)
972  				f(sb, arg);
973  			super_unlock_shared(sb);
974  		}
975  
976  		spin_lock(&sb_lock);
977  		if (p)
978  			__put_super(p);
979  		p = sb;
980  	}
981  	if (p)
982  		__put_super(p);
983  	spin_unlock(&sb_lock);
984  }
985  
986  EXPORT_SYMBOL(iterate_supers_type);
987  
user_get_super(dev_t dev,bool excl)988  struct super_block *user_get_super(dev_t dev, bool excl)
989  {
990  	struct super_block *sb;
991  
992  	spin_lock(&sb_lock);
993  	list_for_each_entry(sb, &super_blocks, s_list) {
994  		if (sb->s_dev ==  dev) {
995  			bool locked;
996  
997  			sb->s_count++;
998  			spin_unlock(&sb_lock);
999  			/* still alive? */
1000  			locked = super_lock(sb, excl);
1001  			if (locked) {
1002  				if (sb->s_root)
1003  					return sb;
1004  				super_unlock(sb, excl);
1005  			}
1006  			/* nope, got unmounted */
1007  			spin_lock(&sb_lock);
1008  			__put_super(sb);
1009  			break;
1010  		}
1011  	}
1012  	spin_unlock(&sb_lock);
1013  	return NULL;
1014  }
1015  
1016  /**
1017   * reconfigure_super - asks filesystem to change superblock parameters
1018   * @fc: The superblock and configuration
1019   *
1020   * Alters the configuration parameters of a live superblock.
1021   */
reconfigure_super(struct fs_context * fc)1022  int reconfigure_super(struct fs_context *fc)
1023  {
1024  	struct super_block *sb = fc->root->d_sb;
1025  	int retval;
1026  	bool remount_ro = false;
1027  	bool remount_rw = false;
1028  	bool force = fc->sb_flags & SB_FORCE;
1029  
1030  	if (fc->sb_flags_mask & ~MS_RMT_MASK)
1031  		return -EINVAL;
1032  	if (sb->s_writers.frozen != SB_UNFROZEN)
1033  		return -EBUSY;
1034  
1035  	retval = security_sb_remount(sb, fc->security);
1036  	if (retval)
1037  		return retval;
1038  
1039  	if (fc->sb_flags_mask & SB_RDONLY) {
1040  #ifdef CONFIG_BLOCK
1041  		if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1042  		    bdev_read_only(sb->s_bdev))
1043  			return -EACCES;
1044  #endif
1045  		remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1046  		remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1047  	}
1048  
1049  	if (remount_ro) {
1050  		if (!hlist_empty(&sb->s_pins)) {
1051  			super_unlock_excl(sb);
1052  			group_pin_kill(&sb->s_pins);
1053  			__super_lock_excl(sb);
1054  			if (!sb->s_root)
1055  				return 0;
1056  			if (sb->s_writers.frozen != SB_UNFROZEN)
1057  				return -EBUSY;
1058  			remount_ro = !sb_rdonly(sb);
1059  		}
1060  	}
1061  	shrink_dcache_sb(sb);
1062  
1063  	/* If we are reconfiguring to RDONLY and current sb is read/write,
1064  	 * make sure there are no files open for writing.
1065  	 */
1066  	if (remount_ro) {
1067  		if (force) {
1068  			sb_start_ro_state_change(sb);
1069  		} else {
1070  			retval = sb_prepare_remount_readonly(sb);
1071  			if (retval)
1072  				return retval;
1073  		}
1074  	} else if (remount_rw) {
1075  		/*
1076  		 * Protect filesystem's reconfigure code from writes from
1077  		 * userspace until reconfigure finishes.
1078  		 */
1079  		sb_start_ro_state_change(sb);
1080  	}
1081  
1082  	if (fc->ops->reconfigure) {
1083  		retval = fc->ops->reconfigure(fc);
1084  		if (retval) {
1085  			if (!force)
1086  				goto cancel_readonly;
1087  			/* If forced remount, go ahead despite any errors */
1088  			WARN(1, "forced remount of a %s fs returned %i\n",
1089  			     sb->s_type->name, retval);
1090  		}
1091  	}
1092  
1093  	WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1094  				 (fc->sb_flags & fc->sb_flags_mask)));
1095  	sb_end_ro_state_change(sb);
1096  
1097  	/*
1098  	 * Some filesystems modify their metadata via some other path than the
1099  	 * bdev buffer cache (eg. use a private mapping, or directories in
1100  	 * pagecache, etc). Also file data modifications go via their own
1101  	 * mappings. So If we try to mount readonly then copy the filesystem
1102  	 * from bdev, we could get stale data, so invalidate it to give a best
1103  	 * effort at coherency.
1104  	 */
1105  	if (remount_ro && sb->s_bdev)
1106  		invalidate_bdev(sb->s_bdev);
1107  	return 0;
1108  
1109  cancel_readonly:
1110  	sb_end_ro_state_change(sb);
1111  	return retval;
1112  }
1113  
do_emergency_remount_callback(struct super_block * sb)1114  static void do_emergency_remount_callback(struct super_block *sb)
1115  {
1116  	bool locked = super_lock_excl(sb);
1117  
1118  	if (locked && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
1119  		struct fs_context *fc;
1120  
1121  		fc = fs_context_for_reconfigure(sb->s_root,
1122  					SB_RDONLY | SB_FORCE, SB_RDONLY);
1123  		if (!IS_ERR(fc)) {
1124  			if (parse_monolithic_mount_data(fc, NULL) == 0)
1125  				(void)reconfigure_super(fc);
1126  			put_fs_context(fc);
1127  		}
1128  	}
1129  	if (locked)
1130  		super_unlock_excl(sb);
1131  }
1132  
do_emergency_remount(struct work_struct * work)1133  static void do_emergency_remount(struct work_struct *work)
1134  {
1135  	__iterate_supers(do_emergency_remount_callback);
1136  	kfree(work);
1137  	printk("Emergency Remount complete\n");
1138  }
1139  
emergency_remount(void)1140  void emergency_remount(void)
1141  {
1142  	struct work_struct *work;
1143  
1144  	work = kmalloc(sizeof(*work), GFP_ATOMIC);
1145  	if (work) {
1146  		INIT_WORK(work, do_emergency_remount);
1147  		schedule_work(work);
1148  	}
1149  }
1150  
do_thaw_all_callback(struct super_block * sb)1151  static void do_thaw_all_callback(struct super_block *sb)
1152  {
1153  	bool locked = super_lock_excl(sb);
1154  
1155  	if (locked && sb->s_root) {
1156  		if (IS_ENABLED(CONFIG_BLOCK))
1157  			while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
1158  				pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1159  		thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
1160  		return;
1161  	}
1162  	if (locked)
1163  		super_unlock_excl(sb);
1164  }
1165  
do_thaw_all(struct work_struct * work)1166  static void do_thaw_all(struct work_struct *work)
1167  {
1168  	__iterate_supers(do_thaw_all_callback);
1169  	kfree(work);
1170  	printk(KERN_WARNING "Emergency Thaw complete\n");
1171  }
1172  
1173  /**
1174   * emergency_thaw_all -- forcibly thaw every frozen filesystem
1175   *
1176   * Used for emergency unfreeze of all filesystems via SysRq
1177   */
emergency_thaw_all(void)1178  void emergency_thaw_all(void)
1179  {
1180  	struct work_struct *work;
1181  
1182  	work = kmalloc(sizeof(*work), GFP_ATOMIC);
1183  	if (work) {
1184  		INIT_WORK(work, do_thaw_all);
1185  		schedule_work(work);
1186  	}
1187  }
1188  
1189  static DEFINE_IDA(unnamed_dev_ida);
1190  
1191  /**
1192   * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1193   * @p: Pointer to a dev_t.
1194   *
1195   * Filesystems which don't use real block devices can call this function
1196   * to allocate a virtual block device.
1197   *
1198   * Context: Any context.  Frequently called while holding sb_lock.
1199   * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1200   * or -ENOMEM if memory allocation failed.
1201   */
get_anon_bdev(dev_t * p)1202  int get_anon_bdev(dev_t *p)
1203  {
1204  	int dev;
1205  
1206  	/*
1207  	 * Many userspace utilities consider an FSID of 0 invalid.
1208  	 * Always return at least 1 from get_anon_bdev.
1209  	 */
1210  	dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1211  			GFP_ATOMIC);
1212  	if (dev == -ENOSPC)
1213  		dev = -EMFILE;
1214  	if (dev < 0)
1215  		return dev;
1216  
1217  	*p = MKDEV(0, dev);
1218  	return 0;
1219  }
1220  EXPORT_SYMBOL(get_anon_bdev);
1221  
free_anon_bdev(dev_t dev)1222  void free_anon_bdev(dev_t dev)
1223  {
1224  	ida_free(&unnamed_dev_ida, MINOR(dev));
1225  }
1226  EXPORT_SYMBOL(free_anon_bdev);
1227  
set_anon_super(struct super_block * s,void * data)1228  int set_anon_super(struct super_block *s, void *data)
1229  {
1230  	return get_anon_bdev(&s->s_dev);
1231  }
1232  EXPORT_SYMBOL(set_anon_super);
1233  
kill_anon_super(struct super_block * sb)1234  void kill_anon_super(struct super_block *sb)
1235  {
1236  	dev_t dev = sb->s_dev;
1237  	generic_shutdown_super(sb);
1238  	kill_super_notify(sb);
1239  	free_anon_bdev(dev);
1240  }
1241  EXPORT_SYMBOL(kill_anon_super);
1242  
kill_litter_super(struct super_block * sb)1243  void kill_litter_super(struct super_block *sb)
1244  {
1245  	if (sb->s_root)
1246  		d_genocide(sb->s_root);
1247  	kill_anon_super(sb);
1248  }
1249  EXPORT_SYMBOL(kill_litter_super);
1250  
set_anon_super_fc(struct super_block * sb,struct fs_context * fc)1251  int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1252  {
1253  	return set_anon_super(sb, NULL);
1254  }
1255  EXPORT_SYMBOL(set_anon_super_fc);
1256  
test_keyed_super(struct super_block * sb,struct fs_context * fc)1257  static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1258  {
1259  	return sb->s_fs_info == fc->s_fs_info;
1260  }
1261  
test_single_super(struct super_block * s,struct fs_context * fc)1262  static int test_single_super(struct super_block *s, struct fs_context *fc)
1263  {
1264  	return 1;
1265  }
1266  
vfs_get_super(struct fs_context * fc,int (* test)(struct super_block *,struct fs_context *),int (* fill_super)(struct super_block * sb,struct fs_context * fc))1267  static int vfs_get_super(struct fs_context *fc,
1268  		int (*test)(struct super_block *, struct fs_context *),
1269  		int (*fill_super)(struct super_block *sb,
1270  				  struct fs_context *fc))
1271  {
1272  	struct super_block *sb;
1273  	int err;
1274  
1275  	sb = sget_fc(fc, test, set_anon_super_fc);
1276  	if (IS_ERR(sb))
1277  		return PTR_ERR(sb);
1278  
1279  	if (!sb->s_root) {
1280  		err = fill_super(sb, fc);
1281  		if (err)
1282  			goto error;
1283  
1284  		sb->s_flags |= SB_ACTIVE;
1285  	}
1286  
1287  	fc->root = dget(sb->s_root);
1288  	return 0;
1289  
1290  error:
1291  	deactivate_locked_super(sb);
1292  	return err;
1293  }
1294  
get_tree_nodev(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1295  int get_tree_nodev(struct fs_context *fc,
1296  		  int (*fill_super)(struct super_block *sb,
1297  				    struct fs_context *fc))
1298  {
1299  	return vfs_get_super(fc, NULL, fill_super);
1300  }
1301  EXPORT_SYMBOL(get_tree_nodev);
1302  
get_tree_single(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc))1303  int get_tree_single(struct fs_context *fc,
1304  		  int (*fill_super)(struct super_block *sb,
1305  				    struct fs_context *fc))
1306  {
1307  	return vfs_get_super(fc, test_single_super, fill_super);
1308  }
1309  EXPORT_SYMBOL(get_tree_single);
1310  
get_tree_keyed(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),void * key)1311  int get_tree_keyed(struct fs_context *fc,
1312  		  int (*fill_super)(struct super_block *sb,
1313  				    struct fs_context *fc),
1314  		void *key)
1315  {
1316  	fc->s_fs_info = key;
1317  	return vfs_get_super(fc, test_keyed_super, fill_super);
1318  }
1319  EXPORT_SYMBOL(get_tree_keyed);
1320  
set_bdev_super(struct super_block * s,void * data)1321  static int set_bdev_super(struct super_block *s, void *data)
1322  {
1323  	s->s_dev = *(dev_t *)data;
1324  	return 0;
1325  }
1326  
super_s_dev_set(struct super_block * s,struct fs_context * fc)1327  static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1328  {
1329  	return set_bdev_super(s, fc->sget_key);
1330  }
1331  
super_s_dev_test(struct super_block * s,struct fs_context * fc)1332  static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1333  {
1334  	return !(s->s_iflags & SB_I_RETIRED) &&
1335  		s->s_dev == *(dev_t *)fc->sget_key;
1336  }
1337  
1338  /**
1339   * sget_dev - Find or create a superblock by device number
1340   * @fc: Filesystem context.
1341   * @dev: device number
1342   *
1343   * Find or create a superblock using the provided device number that
1344   * will be stored in fc->sget_key.
1345   *
1346   * If an extant superblock is matched, then that will be returned with
1347   * an elevated reference count that the caller must transfer or discard.
1348   *
1349   * If no match is made, a new superblock will be allocated and basic
1350   * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1351   * be set). The superblock will be published and it will be returned in
1352   * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1353   * unset.
1354   *
1355   * Return: an existing or newly created superblock on success, an error
1356   *         pointer on failure.
1357   */
sget_dev(struct fs_context * fc,dev_t dev)1358  struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1359  {
1360  	fc->sget_key = &dev;
1361  	return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1362  }
1363  EXPORT_SYMBOL(sget_dev);
1364  
1365  #ifdef CONFIG_BLOCK
1366  /*
1367   * Lock the superblock that is holder of the bdev. Returns the superblock
1368   * pointer if we successfully locked the superblock and it is alive. Otherwise
1369   * we return NULL and just unlock bdev->bd_holder_lock.
1370   *
1371   * The function must be called with bdev->bd_holder_lock and releases it.
1372   */
bdev_super_lock(struct block_device * bdev,bool excl)1373  static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
1374  	__releases(&bdev->bd_holder_lock)
1375  {
1376  	struct super_block *sb = bdev->bd_holder;
1377  	bool locked;
1378  
1379  	lockdep_assert_held(&bdev->bd_holder_lock);
1380  	lockdep_assert_not_held(&sb->s_umount);
1381  	lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
1382  
1383  	/* Make sure sb doesn't go away from under us */
1384  	spin_lock(&sb_lock);
1385  	sb->s_count++;
1386  	spin_unlock(&sb_lock);
1387  
1388  	mutex_unlock(&bdev->bd_holder_lock);
1389  
1390  	locked = super_lock(sb, excl);
1391  
1392  	/*
1393  	 * If the superblock wasn't already SB_DYING then we hold
1394  	 * s_umount and can safely drop our temporary reference.
1395           */
1396  	put_super(sb);
1397  
1398  	if (!locked)
1399  		return NULL;
1400  
1401  	if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1402  		super_unlock(sb, excl);
1403  		return NULL;
1404  	}
1405  
1406  	return sb;
1407  }
1408  
fs_bdev_mark_dead(struct block_device * bdev,bool surprise)1409  static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1410  {
1411  	struct super_block *sb;
1412  
1413  	sb = bdev_super_lock(bdev, false);
1414  	if (!sb)
1415  		return;
1416  
1417  	if (!surprise)
1418  		sync_filesystem(sb);
1419  	shrink_dcache_sb(sb);
1420  	invalidate_inodes(sb);
1421  	if (sb->s_op->shutdown)
1422  		sb->s_op->shutdown(sb);
1423  
1424  	super_unlock_shared(sb);
1425  }
1426  
fs_bdev_sync(struct block_device * bdev)1427  static void fs_bdev_sync(struct block_device *bdev)
1428  {
1429  	struct super_block *sb;
1430  
1431  	sb = bdev_super_lock(bdev, false);
1432  	if (!sb)
1433  		return;
1434  
1435  	sync_filesystem(sb);
1436  	super_unlock_shared(sb);
1437  }
1438  
get_bdev_super(struct block_device * bdev)1439  static struct super_block *get_bdev_super(struct block_device *bdev)
1440  {
1441  	bool active = false;
1442  	struct super_block *sb;
1443  
1444  	sb = bdev_super_lock(bdev, true);
1445  	if (sb) {
1446  		active = atomic_inc_not_zero(&sb->s_active);
1447  		super_unlock_excl(sb);
1448  	}
1449  	if (!active)
1450  		return NULL;
1451  	return sb;
1452  }
1453  
1454  /**
1455   * fs_bdev_freeze - freeze owning filesystem of block device
1456   * @bdev: block device
1457   *
1458   * Freeze the filesystem that owns this block device if it is still
1459   * active.
1460   *
1461   * A filesystem that owns multiple block devices may be frozen from each
1462   * block device and won't be unfrozen until all block devices are
1463   * unfrozen. Each block device can only freeze the filesystem once as we
1464   * nest freezes for block devices in the block layer.
1465   *
1466   * Return: If the freeze was successful zero is returned. If the freeze
1467   *         failed a negative error code is returned.
1468   */
fs_bdev_freeze(struct block_device * bdev)1469  static int fs_bdev_freeze(struct block_device *bdev)
1470  {
1471  	struct super_block *sb;
1472  	int error = 0;
1473  
1474  	lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1475  
1476  	sb = get_bdev_super(bdev);
1477  	if (!sb)
1478  		return -EINVAL;
1479  
1480  	if (sb->s_op->freeze_super)
1481  		error = sb->s_op->freeze_super(sb,
1482  				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1483  	else
1484  		error = freeze_super(sb,
1485  				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1486  	if (!error)
1487  		error = sync_blockdev(bdev);
1488  	deactivate_super(sb);
1489  	return error;
1490  }
1491  
1492  /**
1493   * fs_bdev_thaw - thaw owning filesystem of block device
1494   * @bdev: block device
1495   *
1496   * Thaw the filesystem that owns this block device.
1497   *
1498   * A filesystem that owns multiple block devices may be frozen from each
1499   * block device and won't be unfrozen until all block devices are
1500   * unfrozen. Each block device can only freeze the filesystem once as we
1501   * nest freezes for block devices in the block layer.
1502   *
1503   * Return: If the thaw was successful zero is returned. If the thaw
1504   *         failed a negative error code is returned. If this function
1505   *         returns zero it doesn't mean that the filesystem is unfrozen
1506   *         as it may have been frozen multiple times (kernel may hold a
1507   *         freeze or might be frozen from other block devices).
1508   */
fs_bdev_thaw(struct block_device * bdev)1509  static int fs_bdev_thaw(struct block_device *bdev)
1510  {
1511  	struct super_block *sb;
1512  	int error;
1513  
1514  	lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1515  
1516  	/*
1517  	 * The block device may have been frozen before it was claimed by a
1518  	 * filesystem. Concurrently another process might try to mount that
1519  	 * frozen block device and has temporarily claimed the block device for
1520  	 * that purpose causing a concurrent fs_bdev_thaw() to end up here. The
1521  	 * mounter is already about to abort mounting because they still saw an
1522  	 * elevanted bdev->bd_fsfreeze_count so get_bdev_super() will return
1523  	 * NULL in that case.
1524  	 */
1525  	sb = get_bdev_super(bdev);
1526  	if (!sb)
1527  		return -EINVAL;
1528  
1529  	if (sb->s_op->thaw_super)
1530  		error = sb->s_op->thaw_super(sb,
1531  				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1532  	else
1533  		error = thaw_super(sb,
1534  				FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1535  	deactivate_super(sb);
1536  	return error;
1537  }
1538  
1539  const struct blk_holder_ops fs_holder_ops = {
1540  	.mark_dead		= fs_bdev_mark_dead,
1541  	.sync			= fs_bdev_sync,
1542  	.freeze			= fs_bdev_freeze,
1543  	.thaw			= fs_bdev_thaw,
1544  };
1545  EXPORT_SYMBOL_GPL(fs_holder_ops);
1546  
setup_bdev_super(struct super_block * sb,int sb_flags,struct fs_context * fc)1547  int setup_bdev_super(struct super_block *sb, int sb_flags,
1548  		struct fs_context *fc)
1549  {
1550  	blk_mode_t mode = sb_open_mode(sb_flags);
1551  	struct file *bdev_file;
1552  	struct block_device *bdev;
1553  
1554  	bdev_file = bdev_file_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1555  	if (IS_ERR(bdev_file)) {
1556  		if (fc)
1557  			errorf(fc, "%s: Can't open blockdev", fc->source);
1558  		return PTR_ERR(bdev_file);
1559  	}
1560  	bdev = file_bdev(bdev_file);
1561  
1562  	/*
1563  	 * This really should be in blkdev_get_by_dev, but right now can't due
1564  	 * to legacy issues that require us to allow opening a block device node
1565  	 * writable from userspace even for a read-only block device.
1566  	 */
1567  	if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1568  		bdev_fput(bdev_file);
1569  		return -EACCES;
1570  	}
1571  
1572  	/*
1573  	 * It is enough to check bdev was not frozen before we set
1574  	 * s_bdev as freezing will wait until SB_BORN is set.
1575  	 */
1576  	if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
1577  		if (fc)
1578  			warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1579  		bdev_fput(bdev_file);
1580  		return -EBUSY;
1581  	}
1582  	spin_lock(&sb_lock);
1583  	sb->s_bdev_file = bdev_file;
1584  	sb->s_bdev = bdev;
1585  	sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1586  	if (bdev_stable_writes(bdev))
1587  		sb->s_iflags |= SB_I_STABLE_WRITES;
1588  	spin_unlock(&sb_lock);
1589  
1590  	snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1591  	shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1592  				sb->s_id);
1593  	sb_set_blocksize(sb, block_size(bdev));
1594  	return 0;
1595  }
1596  EXPORT_SYMBOL_GPL(setup_bdev_super);
1597  
1598  /**
1599   * get_tree_bdev_flags - Get a superblock based on a single block device
1600   * @fc: The filesystem context holding the parameters
1601   * @fill_super: Helper to initialise a new superblock
1602   * @flags: GET_TREE_BDEV_* flags
1603   */
get_tree_bdev_flags(struct fs_context * fc,int (* fill_super)(struct super_block * sb,struct fs_context * fc),unsigned int flags)1604  int get_tree_bdev_flags(struct fs_context *fc,
1605  		int (*fill_super)(struct super_block *sb,
1606  				  struct fs_context *fc), unsigned int flags)
1607  {
1608  	struct super_block *s;
1609  	int error = 0;
1610  	dev_t dev;
1611  
1612  	if (!fc->source)
1613  		return invalf(fc, "No source specified");
1614  
1615  	error = lookup_bdev(fc->source, &dev);
1616  	if (error) {
1617  		if (!(flags & GET_TREE_BDEV_QUIET_LOOKUP))
1618  			errorf(fc, "%s: Can't lookup blockdev", fc->source);
1619  		return error;
1620  	}
1621  	fc->sb_flags |= SB_NOSEC;
1622  	s = sget_dev(fc, dev);
1623  	if (IS_ERR(s))
1624  		return PTR_ERR(s);
1625  
1626  	if (s->s_root) {
1627  		/* Don't summarily change the RO/RW state. */
1628  		if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1629  			warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1630  			deactivate_locked_super(s);
1631  			return -EBUSY;
1632  		}
1633  	} else {
1634  		error = setup_bdev_super(s, fc->sb_flags, fc);
1635  		if (!error)
1636  			error = fill_super(s, fc);
1637  		if (error) {
1638  			deactivate_locked_super(s);
1639  			return error;
1640  		}
1641  		s->s_flags |= SB_ACTIVE;
1642  	}
1643  
1644  	BUG_ON(fc->root);
1645  	fc->root = dget(s->s_root);
1646  	return 0;
1647  }
1648  EXPORT_SYMBOL_GPL(get_tree_bdev_flags);
1649  
1650  /**
1651   * get_tree_bdev - Get a superblock based on a single block device
1652   * @fc: The filesystem context holding the parameters
1653   * @fill_super: Helper to initialise a new superblock
1654   */
get_tree_bdev(struct fs_context * fc,int (* fill_super)(struct super_block *,struct fs_context *))1655  int get_tree_bdev(struct fs_context *fc,
1656  		int (*fill_super)(struct super_block *,
1657  				  struct fs_context *))
1658  {
1659  	return get_tree_bdev_flags(fc, fill_super, 0);
1660  }
1661  EXPORT_SYMBOL(get_tree_bdev);
1662  
test_bdev_super(struct super_block * s,void * data)1663  static int test_bdev_super(struct super_block *s, void *data)
1664  {
1665  	return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
1666  }
1667  
mount_bdev(struct file_system_type * fs_type,int flags,const char * dev_name,void * data,int (* fill_super)(struct super_block *,void *,int))1668  struct dentry *mount_bdev(struct file_system_type *fs_type,
1669  	int flags, const char *dev_name, void *data,
1670  	int (*fill_super)(struct super_block *, void *, int))
1671  {
1672  	struct super_block *s;
1673  	int error;
1674  	dev_t dev;
1675  
1676  	error = lookup_bdev(dev_name, &dev);
1677  	if (error)
1678  		return ERR_PTR(error);
1679  
1680  	flags |= SB_NOSEC;
1681  	s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev);
1682  	if (IS_ERR(s))
1683  		return ERR_CAST(s);
1684  
1685  	if (s->s_root) {
1686  		if ((flags ^ s->s_flags) & SB_RDONLY) {
1687  			deactivate_locked_super(s);
1688  			return ERR_PTR(-EBUSY);
1689  		}
1690  	} else {
1691  		error = setup_bdev_super(s, flags, NULL);
1692  		if (!error)
1693  			error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1694  		if (error) {
1695  			deactivate_locked_super(s);
1696  			return ERR_PTR(error);
1697  		}
1698  
1699  		s->s_flags |= SB_ACTIVE;
1700  	}
1701  
1702  	return dget(s->s_root);
1703  }
1704  EXPORT_SYMBOL(mount_bdev);
1705  
kill_block_super(struct super_block * sb)1706  void kill_block_super(struct super_block *sb)
1707  {
1708  	struct block_device *bdev = sb->s_bdev;
1709  
1710  	generic_shutdown_super(sb);
1711  	if (bdev) {
1712  		sync_blockdev(bdev);
1713  		bdev_fput(sb->s_bdev_file);
1714  	}
1715  }
1716  
1717  EXPORT_SYMBOL(kill_block_super);
1718  #endif
1719  
mount_nodev(struct file_system_type * fs_type,int flags,void * data,int (* fill_super)(struct super_block *,void *,int))1720  struct dentry *mount_nodev(struct file_system_type *fs_type,
1721  	int flags, void *data,
1722  	int (*fill_super)(struct super_block *, void *, int))
1723  {
1724  	int error;
1725  	struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1726  
1727  	if (IS_ERR(s))
1728  		return ERR_CAST(s);
1729  
1730  	error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1731  	if (error) {
1732  		deactivate_locked_super(s);
1733  		return ERR_PTR(error);
1734  	}
1735  	s->s_flags |= SB_ACTIVE;
1736  	return dget(s->s_root);
1737  }
1738  EXPORT_SYMBOL(mount_nodev);
1739  
reconfigure_single(struct super_block * s,int flags,void * data)1740  int reconfigure_single(struct super_block *s,
1741  		       int flags, void *data)
1742  {
1743  	struct fs_context *fc;
1744  	int ret;
1745  
1746  	/* The caller really need to be passing fc down into mount_single(),
1747  	 * then a chunk of this can be removed.  [Bollocks -- AV]
1748  	 * Better yet, reconfiguration shouldn't happen, but rather the second
1749  	 * mount should be rejected if the parameters are not compatible.
1750  	 */
1751  	fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1752  	if (IS_ERR(fc))
1753  		return PTR_ERR(fc);
1754  
1755  	ret = parse_monolithic_mount_data(fc, data);
1756  	if (ret < 0)
1757  		goto out;
1758  
1759  	ret = reconfigure_super(fc);
1760  out:
1761  	put_fs_context(fc);
1762  	return ret;
1763  }
1764  
compare_single(struct super_block * s,void * p)1765  static int compare_single(struct super_block *s, void *p)
1766  {
1767  	return 1;
1768  }
1769  
mount_single(struct file_system_type * fs_type,int flags,void * data,int (* fill_super)(struct super_block *,void *,int))1770  struct dentry *mount_single(struct file_system_type *fs_type,
1771  	int flags, void *data,
1772  	int (*fill_super)(struct super_block *, void *, int))
1773  {
1774  	struct super_block *s;
1775  	int error;
1776  
1777  	s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1778  	if (IS_ERR(s))
1779  		return ERR_CAST(s);
1780  	if (!s->s_root) {
1781  		error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1782  		if (!error)
1783  			s->s_flags |= SB_ACTIVE;
1784  	} else {
1785  		error = reconfigure_single(s, flags, data);
1786  	}
1787  	if (unlikely(error)) {
1788  		deactivate_locked_super(s);
1789  		return ERR_PTR(error);
1790  	}
1791  	return dget(s->s_root);
1792  }
1793  EXPORT_SYMBOL(mount_single);
1794  
1795  /**
1796   * vfs_get_tree - Get the mountable root
1797   * @fc: The superblock configuration context.
1798   *
1799   * The filesystem is invoked to get or create a superblock which can then later
1800   * be used for mounting.  The filesystem places a pointer to the root to be
1801   * used for mounting in @fc->root.
1802   */
vfs_get_tree(struct fs_context * fc)1803  int vfs_get_tree(struct fs_context *fc)
1804  {
1805  	struct super_block *sb;
1806  	int error;
1807  
1808  	if (fc->root)
1809  		return -EBUSY;
1810  
1811  	/* Get the mountable root in fc->root, with a ref on the root and a ref
1812  	 * on the superblock.
1813  	 */
1814  	error = fc->ops->get_tree(fc);
1815  	if (error < 0)
1816  		return error;
1817  
1818  	if (!fc->root) {
1819  		pr_err("Filesystem %s get_tree() didn't set fc->root, returned %i\n",
1820  		       fc->fs_type->name, error);
1821  		/* We don't know what the locking state of the superblock is -
1822  		 * if there is a superblock.
1823  		 */
1824  		BUG();
1825  	}
1826  
1827  	sb = fc->root->d_sb;
1828  	WARN_ON(!sb->s_bdi);
1829  
1830  	/*
1831  	 * super_wake() contains a memory barrier which also care of
1832  	 * ordering for super_cache_count(). We place it before setting
1833  	 * SB_BORN as the data dependency between the two functions is
1834  	 * the superblock structure contents that we just set up, not
1835  	 * the SB_BORN flag.
1836  	 */
1837  	super_wake(sb, SB_BORN);
1838  
1839  	error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1840  	if (unlikely(error)) {
1841  		fc_drop_locked(fc);
1842  		return error;
1843  	}
1844  
1845  	/*
1846  	 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1847  	 * but s_maxbytes was an unsigned long long for many releases. Throw
1848  	 * this warning for a little while to try and catch filesystems that
1849  	 * violate this rule.
1850  	 */
1851  	WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1852  		"negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1853  
1854  	return 0;
1855  }
1856  EXPORT_SYMBOL(vfs_get_tree);
1857  
1858  /*
1859   * Setup private BDI for given superblock. It gets automatically cleaned up
1860   * in generic_shutdown_super().
1861   */
super_setup_bdi_name(struct super_block * sb,char * fmt,...)1862  int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1863  {
1864  	struct backing_dev_info *bdi;
1865  	int err;
1866  	va_list args;
1867  
1868  	bdi = bdi_alloc(NUMA_NO_NODE);
1869  	if (!bdi)
1870  		return -ENOMEM;
1871  
1872  	va_start(args, fmt);
1873  	err = bdi_register_va(bdi, fmt, args);
1874  	va_end(args);
1875  	if (err) {
1876  		bdi_put(bdi);
1877  		return err;
1878  	}
1879  	WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1880  	sb->s_bdi = bdi;
1881  	sb->s_iflags |= SB_I_PERSB_BDI;
1882  
1883  	return 0;
1884  }
1885  EXPORT_SYMBOL(super_setup_bdi_name);
1886  
1887  /*
1888   * Setup private BDI for given superblock. I gets automatically cleaned up
1889   * in generic_shutdown_super().
1890   */
super_setup_bdi(struct super_block * sb)1891  int super_setup_bdi(struct super_block *sb)
1892  {
1893  	static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1894  
1895  	return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1896  				    atomic_long_inc_return(&bdi_seq));
1897  }
1898  EXPORT_SYMBOL(super_setup_bdi);
1899  
1900  /**
1901   * sb_wait_write - wait until all writers to given file system finish
1902   * @sb: the super for which we wait
1903   * @level: type of writers we wait for (normal vs page fault)
1904   *
1905   * This function waits until there are no writers of given type to given file
1906   * system.
1907   */
sb_wait_write(struct super_block * sb,int level)1908  static void sb_wait_write(struct super_block *sb, int level)
1909  {
1910  	percpu_down_write(sb->s_writers.rw_sem + level-1);
1911  }
1912  
1913  /*
1914   * We are going to return to userspace and forget about these locks, the
1915   * ownership goes to the caller of thaw_super() which does unlock().
1916   */
lockdep_sb_freeze_release(struct super_block * sb)1917  static void lockdep_sb_freeze_release(struct super_block *sb)
1918  {
1919  	int level;
1920  
1921  	for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1922  		percpu_rwsem_release(sb->s_writers.rw_sem + level, _THIS_IP_);
1923  }
1924  
1925  /*
1926   * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1927   */
lockdep_sb_freeze_acquire(struct super_block * sb)1928  static void lockdep_sb_freeze_acquire(struct super_block *sb)
1929  {
1930  	int level;
1931  
1932  	for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1933  		percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1934  }
1935  
sb_freeze_unlock(struct super_block * sb,int level)1936  static void sb_freeze_unlock(struct super_block *sb, int level)
1937  {
1938  	for (level--; level >= 0; level--)
1939  		percpu_up_write(sb->s_writers.rw_sem + level);
1940  }
1941  
wait_for_partially_frozen(struct super_block * sb)1942  static int wait_for_partially_frozen(struct super_block *sb)
1943  {
1944  	int ret = 0;
1945  
1946  	do {
1947  		unsigned short old = sb->s_writers.frozen;
1948  
1949  		up_write(&sb->s_umount);
1950  		ret = wait_var_event_killable(&sb->s_writers.frozen,
1951  					       sb->s_writers.frozen != old);
1952  		down_write(&sb->s_umount);
1953  	} while (ret == 0 &&
1954  		 sb->s_writers.frozen != SB_UNFROZEN &&
1955  		 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1956  
1957  	return ret;
1958  }
1959  
1960  #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
1961  #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST)
1962  
freeze_inc(struct super_block * sb,enum freeze_holder who)1963  static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1964  {
1965  	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1966  	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1967  
1968  	if (who & FREEZE_HOLDER_KERNEL)
1969  		++sb->s_writers.freeze_kcount;
1970  	if (who & FREEZE_HOLDER_USERSPACE)
1971  		++sb->s_writers.freeze_ucount;
1972  	return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1973  }
1974  
freeze_dec(struct super_block * sb,enum freeze_holder who)1975  static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1976  {
1977  	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1978  	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1979  
1980  	if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1981  		--sb->s_writers.freeze_kcount;
1982  	if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1983  		--sb->s_writers.freeze_ucount;
1984  	return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1985  }
1986  
may_freeze(struct super_block * sb,enum freeze_holder who)1987  static inline bool may_freeze(struct super_block *sb, enum freeze_holder who)
1988  {
1989  	WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1990  	WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1991  
1992  	if (who & FREEZE_HOLDER_KERNEL)
1993  		return (who & FREEZE_MAY_NEST) ||
1994  		       sb->s_writers.freeze_kcount == 0;
1995  	if (who & FREEZE_HOLDER_USERSPACE)
1996  		return (who & FREEZE_MAY_NEST) ||
1997  		       sb->s_writers.freeze_ucount == 0;
1998  	return false;
1999  }
2000  
2001  /**
2002   * freeze_super - lock the filesystem and force it into a consistent state
2003   * @sb: the super to lock
2004   * @who: context that wants to freeze
2005   *
2006   * Syncs the super to make sure the filesystem is consistent and calls the fs's
2007   * freeze_fs.  Subsequent calls to this without first thawing the fs may return
2008   * -EBUSY.
2009   *
2010   * @who should be:
2011   * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
2012   * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
2013   * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
2014   *
2015   * The @who argument distinguishes between the kernel and userspace trying to
2016   * freeze the filesystem.  Although there cannot be multiple kernel freezes or
2017   * multiple userspace freezes in effect at any given time, the kernel and
2018   * userspace can both hold a filesystem frozen.  The filesystem remains frozen
2019   * until there are no kernel or userspace freezes in effect.
2020   *
2021   * A filesystem may hold multiple devices and thus a filesystems may be
2022   * frozen through the block layer via multiple block devices. In this
2023   * case the request is marked as being allowed to nest by passing
2024   * FREEZE_MAY_NEST. The filesystem remains frozen until all block
2025   * devices are unfrozen. If multiple freezes are attempted without
2026   * FREEZE_MAY_NEST -EBUSY will be returned.
2027   *
2028   * During this function, sb->s_writers.frozen goes through these values:
2029   *
2030   * SB_UNFROZEN: File system is normal, all writes progress as usual.
2031   *
2032   * SB_FREEZE_WRITE: The file system is in the process of being frozen.  New
2033   * writes should be blocked, though page faults are still allowed. We wait for
2034   * all writes to complete and then proceed to the next stage.
2035   *
2036   * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
2037   * but internal fs threads can still modify the filesystem (although they
2038   * should not dirty new pages or inodes), writeback can run etc. After waiting
2039   * for all running page faults we sync the filesystem which will clean all
2040   * dirty pages and inodes (no new dirty pages or inodes can be created when
2041   * sync is running).
2042   *
2043   * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
2044   * modification are blocked (e.g. XFS preallocation truncation on inode
2045   * reclaim). This is usually implemented by blocking new transactions for
2046   * filesystems that have them and need this additional guard. After all
2047   * internal writers are finished we call ->freeze_fs() to finish filesystem
2048   * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
2049   * mostly auxiliary for filesystems to verify they do not modify frozen fs.
2050   *
2051   * sb->s_writers.frozen is protected by sb->s_umount.
2052   *
2053   * Return: If the freeze was successful zero is returned. If the freeze
2054   *         failed a negative error code is returned.
2055   */
freeze_super(struct super_block * sb,enum freeze_holder who)2056  int freeze_super(struct super_block *sb, enum freeze_holder who)
2057  {
2058  	int ret;
2059  
2060  	if (!super_lock_excl(sb)) {
2061  		WARN_ON_ONCE("Dying superblock while freezing!");
2062  		return -EINVAL;
2063  	}
2064  	atomic_inc(&sb->s_active);
2065  
2066  retry:
2067  	if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2068  		if (may_freeze(sb, who))
2069  			ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2070  		else
2071  			ret = -EBUSY;
2072  		/* All freezers share a single active reference. */
2073  		deactivate_locked_super(sb);
2074  		return ret;
2075  	}
2076  
2077  	if (sb->s_writers.frozen != SB_UNFROZEN) {
2078  		ret = wait_for_partially_frozen(sb);
2079  		if (ret) {
2080  			deactivate_locked_super(sb);
2081  			return ret;
2082  		}
2083  
2084  		goto retry;
2085  	}
2086  
2087  	if (sb_rdonly(sb)) {
2088  		/* Nothing to do really... */
2089  		WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2090  		sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2091  		wake_up_var(&sb->s_writers.frozen);
2092  		super_unlock_excl(sb);
2093  		return 0;
2094  	}
2095  
2096  	sb->s_writers.frozen = SB_FREEZE_WRITE;
2097  	/* Release s_umount to preserve sb_start_write -> s_umount ordering */
2098  	super_unlock_excl(sb);
2099  	sb_wait_write(sb, SB_FREEZE_WRITE);
2100  	__super_lock_excl(sb);
2101  
2102  	/* Now we go and block page faults... */
2103  	sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2104  	sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2105  
2106  	/* All writers are done so after syncing there won't be dirty data */
2107  	ret = sync_filesystem(sb);
2108  	if (ret) {
2109  		sb->s_writers.frozen = SB_UNFROZEN;
2110  		sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2111  		wake_up_var(&sb->s_writers.frozen);
2112  		deactivate_locked_super(sb);
2113  		return ret;
2114  	}
2115  
2116  	/* Now wait for internal filesystem counter */
2117  	sb->s_writers.frozen = SB_FREEZE_FS;
2118  	sb_wait_write(sb, SB_FREEZE_FS);
2119  
2120  	if (sb->s_op->freeze_fs) {
2121  		ret = sb->s_op->freeze_fs(sb);
2122  		if (ret) {
2123  			printk(KERN_ERR
2124  				"VFS:Filesystem freeze failed\n");
2125  			sb->s_writers.frozen = SB_UNFROZEN;
2126  			sb_freeze_unlock(sb, SB_FREEZE_FS);
2127  			wake_up_var(&sb->s_writers.frozen);
2128  			deactivate_locked_super(sb);
2129  			return ret;
2130  		}
2131  	}
2132  	/*
2133  	 * For debugging purposes so that fs can warn if it sees write activity
2134  	 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2135  	 */
2136  	WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2137  	sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2138  	wake_up_var(&sb->s_writers.frozen);
2139  	lockdep_sb_freeze_release(sb);
2140  	super_unlock_excl(sb);
2141  	return 0;
2142  }
2143  EXPORT_SYMBOL(freeze_super);
2144  
2145  /*
2146   * Undoes the effect of a freeze_super_locked call.  If the filesystem is
2147   * frozen both by userspace and the kernel, a thaw call from either source
2148   * removes that state without releasing the other state or unlocking the
2149   * filesystem.
2150   */
thaw_super_locked(struct super_block * sb,enum freeze_holder who)2151  static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
2152  {
2153  	int error = -EINVAL;
2154  
2155  	if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2156  		goto out_unlock;
2157  
2158  	/*
2159  	 * All freezers share a single active reference.
2160  	 * So just unlock in case there are any left.
2161  	 */
2162  	if (freeze_dec(sb, who))
2163  		goto out_unlock;
2164  
2165  	if (sb_rdonly(sb)) {
2166  		sb->s_writers.frozen = SB_UNFROZEN;
2167  		wake_up_var(&sb->s_writers.frozen);
2168  		goto out_deactivate;
2169  	}
2170  
2171  	lockdep_sb_freeze_acquire(sb);
2172  
2173  	if (sb->s_op->unfreeze_fs) {
2174  		error = sb->s_op->unfreeze_fs(sb);
2175  		if (error) {
2176  			pr_err("VFS: Filesystem thaw failed\n");
2177  			freeze_inc(sb, who);
2178  			lockdep_sb_freeze_release(sb);
2179  			goto out_unlock;
2180  		}
2181  	}
2182  
2183  	sb->s_writers.frozen = SB_UNFROZEN;
2184  	wake_up_var(&sb->s_writers.frozen);
2185  	sb_freeze_unlock(sb, SB_FREEZE_FS);
2186  out_deactivate:
2187  	deactivate_locked_super(sb);
2188  	return 0;
2189  
2190  out_unlock:
2191  	super_unlock_excl(sb);
2192  	return error;
2193  }
2194  
2195  /**
2196   * thaw_super -- unlock filesystem
2197   * @sb: the super to thaw
2198   * @who: context that wants to freeze
2199   *
2200   * Unlocks the filesystem and marks it writeable again after freeze_super()
2201   * if there are no remaining freezes on the filesystem.
2202   *
2203   * @who should be:
2204   * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2205   * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2206   * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
2207   *
2208   * A filesystem may hold multiple devices and thus a filesystems may
2209   * have been frozen through the block layer via multiple block devices.
2210   * The filesystem remains frozen until all block devices are unfrozen.
2211   */
thaw_super(struct super_block * sb,enum freeze_holder who)2212  int thaw_super(struct super_block *sb, enum freeze_holder who)
2213  {
2214  	if (!super_lock_excl(sb)) {
2215  		WARN_ON_ONCE("Dying superblock while thawing!");
2216  		return -EINVAL;
2217  	}
2218  	return thaw_super_locked(sb, who);
2219  }
2220  EXPORT_SYMBOL(thaw_super);
2221  
2222  /*
2223   * Create workqueue for deferred direct IO completions. We allocate the
2224   * workqueue when it's first needed. This avoids creating workqueue for
2225   * filesystems that don't need it and also allows us to create the workqueue
2226   * late enough so the we can include s_id in the name of the workqueue.
2227   */
sb_init_dio_done_wq(struct super_block * sb)2228  int sb_init_dio_done_wq(struct super_block *sb)
2229  {
2230  	struct workqueue_struct *old;
2231  	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2232  						      WQ_MEM_RECLAIM, 0,
2233  						      sb->s_id);
2234  	if (!wq)
2235  		return -ENOMEM;
2236  	/*
2237  	 * This has to be atomic as more DIOs can race to create the workqueue
2238  	 */
2239  	old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
2240  	/* Someone created workqueue before us? Free ours... */
2241  	if (old)
2242  		destroy_workqueue(wq);
2243  	return 0;
2244  }
2245  EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
2246