1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14 
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29 
30 struct workqueue_struct *gfs2_freeze_wq;
31 
32 extern struct workqueue_struct *gfs2_control_wq;
33 
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37 
38 	fs_err(sdp,
39 	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 	       "state 0x%lx\n",
41 	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 	       bh->b_folio->mapping, bh->b_folio->flags);
43 	fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 	       gl->gl_name.ln_type, gl->gl_name.ln_number,
45 	       gfs2_glock2aspace(gl));
46 	gfs2_lm(sdp, "AIL error\n");
47 	gfs2_withdraw_delayed(sdp);
48 }
49 
50 /**
51  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
52  * @gl: the glock
53  * @fsync: set when called from fsync (not all buffers will be clean)
54  * @nr_revokes: Number of buffers to revoke
55  *
56  * None of the buffers should be dirty, locked, or pinned.
57  */
58 
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 			     unsigned int nr_revokes)
61 {
62 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63 	struct list_head *head = &gl->gl_ail_list;
64 	struct gfs2_bufdata *bd, *tmp;
65 	struct buffer_head *bh;
66 	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
67 
68 	gfs2_log_lock(sdp);
69 	spin_lock(&sdp->sd_ail_lock);
70 	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 		if (nr_revokes == 0)
72 			break;
73 		bh = bd->bd_bh;
74 		if (bh->b_state & b_state) {
75 			if (fsync)
76 				continue;
77 			gfs2_ail_error(gl, bh);
78 		}
79 		gfs2_trans_add_revoke(sdp, bd);
80 		nr_revokes--;
81 	}
82 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
83 	spin_unlock(&sdp->sd_ail_lock);
84 	gfs2_log_unlock(sdp);
85 
86 	if (gfs2_withdrawing(sdp))
87 		gfs2_withdraw(sdp);
88 }
89 
90 
gfs2_ail_empty_gl(struct gfs2_glock * gl)91 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
92 {
93 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
94 	struct gfs2_trans tr;
95 	unsigned int revokes;
96 	int ret = 0;
97 
98 	revokes = atomic_read(&gl->gl_ail_count);
99 
100 	if (!revokes) {
101 		bool have_revokes;
102 		bool log_in_flight;
103 
104 		/*
105 		 * We have nothing on the ail, but there could be revokes on
106 		 * the sdp revoke queue, in which case, we still want to flush
107 		 * the log and wait for it to finish.
108 		 *
109 		 * If the sdp revoke list is empty too, we might still have an
110 		 * io outstanding for writing revokes, so we should wait for
111 		 * it before returning.
112 		 *
113 		 * If none of these conditions are true, our revokes are all
114 		 * flushed and we can return.
115 		 */
116 		gfs2_log_lock(sdp);
117 		have_revokes = !list_empty(&sdp->sd_log_revokes);
118 		log_in_flight = atomic_read(&sdp->sd_log_in_flight);
119 		gfs2_log_unlock(sdp);
120 		if (have_revokes)
121 			goto flush;
122 		if (log_in_flight)
123 			log_flush_wait(sdp);
124 		return 0;
125 	}
126 
127 	memset(&tr, 0, sizeof(tr));
128 	set_bit(TR_ONSTACK, &tr.tr_flags);
129 	ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
130 	if (ret) {
131 		fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret);
132 		goto flush;
133 	}
134 	__gfs2_ail_flush(gl, 0, revokes);
135 	gfs2_trans_end(sdp);
136 
137 flush:
138 	if (!ret)
139 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
140 				GFS2_LFC_AIL_EMPTY_GL);
141 	return ret;
142 }
143 
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)144 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
145 {
146 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
147 	unsigned int revokes = atomic_read(&gl->gl_ail_count);
148 	int ret;
149 
150 	if (!revokes)
151 		return;
152 
153 	ret = gfs2_trans_begin(sdp, 0, revokes);
154 	if (ret)
155 		return;
156 	__gfs2_ail_flush(gl, fsync, revokes);
157 	gfs2_trans_end(sdp);
158 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
159 		       GFS2_LFC_AIL_FLUSH);
160 }
161 
162 /**
163  * gfs2_rgrp_metasync - sync out the metadata of a resource group
164  * @gl: the glock protecting the resource group
165  *
166  */
167 
gfs2_rgrp_metasync(struct gfs2_glock * gl)168 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
169 {
170 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
171 	struct address_space *metamapping = &sdp->sd_aspace;
172 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
173 	const unsigned bsize = sdp->sd_sb.sb_bsize;
174 	loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
175 	loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
176 	int error;
177 
178 	filemap_fdatawrite_range(metamapping, start, end);
179 	error = filemap_fdatawait_range(metamapping, start, end);
180 	WARN_ON_ONCE(error && !gfs2_withdrawing_or_withdrawn(sdp));
181 	mapping_set_error(metamapping, error);
182 	if (error)
183 		gfs2_io_error(sdp);
184 	return error;
185 }
186 
187 /**
188  * rgrp_go_sync - sync out the metadata for this glock
189  * @gl: the glock
190  *
191  * Called when demoting or unlocking an EX glock.  We must flush
192  * to disk all dirty buffers/pages relating to this glock, and must not
193  * return to caller to demote/unlock the glock until I/O is complete.
194  */
195 
rgrp_go_sync(struct gfs2_glock * gl)196 static int rgrp_go_sync(struct gfs2_glock *gl)
197 {
198 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
199 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
200 	int error;
201 
202 	if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
203 		return 0;
204 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
205 
206 	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
207 		       GFS2_LFC_RGRP_GO_SYNC);
208 	error = gfs2_rgrp_metasync(gl);
209 	if (!error)
210 		error = gfs2_ail_empty_gl(gl);
211 	gfs2_free_clones(rgd);
212 	return error;
213 }
214 
215 /**
216  * rgrp_go_inval - invalidate the metadata for this glock
217  * @gl: the glock
218  * @flags:
219  *
220  * We never used LM_ST_DEFERRED with resource groups, so that we
221  * should always see the metadata flag set here.
222  *
223  */
224 
rgrp_go_inval(struct gfs2_glock * gl,int flags)225 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
226 {
227 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
228 	struct address_space *mapping = &sdp->sd_aspace;
229 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
230 	const unsigned bsize = sdp->sd_sb.sb_bsize;
231 	loff_t start, end;
232 
233 	if (!rgd)
234 		return;
235 	start = (rgd->rd_addr * bsize) & PAGE_MASK;
236 	end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
237 	gfs2_rgrp_brelse(rgd);
238 	WARN_ON_ONCE(!(flags & DIO_METADATA));
239 	truncate_inode_pages_range(mapping, start, end);
240 }
241 
gfs2_rgrp_go_dump(struct seq_file * seq,const struct gfs2_glock * gl,const char * fs_id_buf)242 static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
243 			      const char *fs_id_buf)
244 {
245 	struct gfs2_rgrpd *rgd = gl->gl_object;
246 
247 	if (rgd)
248 		gfs2_rgrp_dump(seq, rgd, fs_id_buf);
249 }
250 
gfs2_glock2inode(struct gfs2_glock * gl)251 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
252 {
253 	struct gfs2_inode *ip;
254 
255 	spin_lock(&gl->gl_lockref.lock);
256 	ip = gl->gl_object;
257 	if (ip)
258 		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
259 	spin_unlock(&gl->gl_lockref.lock);
260 	return ip;
261 }
262 
gfs2_glock2rgrp(struct gfs2_glock * gl)263 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
264 {
265 	struct gfs2_rgrpd *rgd;
266 
267 	spin_lock(&gl->gl_lockref.lock);
268 	rgd = gl->gl_object;
269 	spin_unlock(&gl->gl_lockref.lock);
270 
271 	return rgd;
272 }
273 
gfs2_clear_glop_pending(struct gfs2_inode * ip)274 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
275 {
276 	if (!ip)
277 		return;
278 
279 	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
280 	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
281 }
282 
283 /**
284  * gfs2_inode_metasync - sync out the metadata of an inode
285  * @gl: the glock protecting the inode
286  *
287  */
gfs2_inode_metasync(struct gfs2_glock * gl)288 int gfs2_inode_metasync(struct gfs2_glock *gl)
289 {
290 	struct address_space *metamapping = gfs2_glock2aspace(gl);
291 	int error;
292 
293 	filemap_fdatawrite(metamapping);
294 	error = filemap_fdatawait(metamapping);
295 	if (error)
296 		gfs2_io_error(gl->gl_name.ln_sbd);
297 	return error;
298 }
299 
300 /**
301  * inode_go_sync - Sync the dirty metadata of an inode
302  * @gl: the glock protecting the inode
303  *
304  */
305 
inode_go_sync(struct gfs2_glock * gl)306 static int inode_go_sync(struct gfs2_glock *gl)
307 {
308 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
309 	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
310 	struct address_space *metamapping = gfs2_glock2aspace(gl);
311 	int error = 0, ret;
312 
313 	if (isreg) {
314 		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
315 			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
316 		inode_dio_wait(&ip->i_inode);
317 	}
318 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
319 		goto out;
320 
321 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
322 
323 	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
324 		       GFS2_LFC_INODE_GO_SYNC);
325 	filemap_fdatawrite(metamapping);
326 	if (isreg) {
327 		struct address_space *mapping = ip->i_inode.i_mapping;
328 		filemap_fdatawrite(mapping);
329 		error = filemap_fdatawait(mapping);
330 		mapping_set_error(mapping, error);
331 	}
332 	ret = gfs2_inode_metasync(gl);
333 	if (!error)
334 		error = ret;
335 	ret = gfs2_ail_empty_gl(gl);
336 	if (!error)
337 		error = ret;
338 	/*
339 	 * Writeback of the data mapping may cause the dirty flag to be set
340 	 * so we have to clear it again here.
341 	 */
342 	smp_mb__before_atomic();
343 	clear_bit(GLF_DIRTY, &gl->gl_flags);
344 
345 out:
346 	gfs2_clear_glop_pending(ip);
347 	return error;
348 }
349 
350 /**
351  * inode_go_inval - prepare a inode glock to be released
352  * @gl: the glock
353  * @flags:
354  *
355  * Normally we invalidate everything, but if we are moving into
356  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
357  * can keep hold of the metadata, since it won't have changed.
358  *
359  */
360 
inode_go_inval(struct gfs2_glock * gl,int flags)361 static void inode_go_inval(struct gfs2_glock *gl, int flags)
362 {
363 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
364 
365 	if (flags & DIO_METADATA) {
366 		struct address_space *mapping = gfs2_glock2aspace(gl);
367 		truncate_inode_pages(mapping, 0);
368 		if (ip) {
369 			set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
370 			forget_all_cached_acls(&ip->i_inode);
371 			security_inode_invalidate_secctx(&ip->i_inode);
372 			gfs2_dir_hash_inval(ip);
373 		}
374 	}
375 
376 	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
377 		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
378 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
379 			       GFS2_LFC_INODE_GO_INVAL);
380 		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
381 	}
382 	if (ip && S_ISREG(ip->i_inode.i_mode))
383 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
384 
385 	gfs2_clear_glop_pending(ip);
386 }
387 
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)388 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
389 {
390 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
391 	const struct gfs2_dinode *str = buf;
392 	struct timespec64 atime, iatime;
393 	u16 height, depth;
394 	umode_t mode = be32_to_cpu(str->di_mode);
395 	struct inode *inode = &ip->i_inode;
396 	bool is_new = inode->i_state & I_NEW;
397 
398 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
399 		gfs2_consist_inode(ip);
400 		return -EIO;
401 	}
402 	if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
403 		gfs2_consist_inode(ip);
404 		return -EIO;
405 	}
406 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
407 	inode->i_mode = mode;
408 	if (is_new) {
409 		inode->i_rdev = 0;
410 		switch (mode & S_IFMT) {
411 		case S_IFBLK:
412 		case S_IFCHR:
413 			inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
414 					      be32_to_cpu(str->di_minor));
415 			break;
416 		}
417 	}
418 
419 	i_uid_write(inode, be32_to_cpu(str->di_uid));
420 	i_gid_write(inode, be32_to_cpu(str->di_gid));
421 	set_nlink(inode, be32_to_cpu(str->di_nlink));
422 	i_size_write(inode, be64_to_cpu(str->di_size));
423 	gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
424 	atime.tv_sec = be64_to_cpu(str->di_atime);
425 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
426 	iatime = inode_get_atime(inode);
427 	if (timespec64_compare(&iatime, &atime) < 0)
428 		inode_set_atime_to_ts(inode, atime);
429 	inode_set_mtime(inode, be64_to_cpu(str->di_mtime),
430 			be32_to_cpu(str->di_mtime_nsec));
431 	inode_set_ctime(inode, be64_to_cpu(str->di_ctime),
432 			be32_to_cpu(str->di_ctime_nsec));
433 
434 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
435 	ip->i_generation = be64_to_cpu(str->di_generation);
436 
437 	ip->i_diskflags = be32_to_cpu(str->di_flags);
438 	ip->i_eattr = be64_to_cpu(str->di_eattr);
439 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
440 	gfs2_set_inode_flags(inode);
441 	height = be16_to_cpu(str->di_height);
442 	if (unlikely(height > sdp->sd_max_height)) {
443 		gfs2_consist_inode(ip);
444 		return -EIO;
445 	}
446 	ip->i_height = (u8)height;
447 
448 	depth = be16_to_cpu(str->di_depth);
449 	if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
450 		gfs2_consist_inode(ip);
451 		return -EIO;
452 	}
453 	ip->i_depth = (u8)depth;
454 	ip->i_entries = be32_to_cpu(str->di_entries);
455 
456 	if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
457 		gfs2_consist_inode(ip);
458 		return -EIO;
459 	}
460 	if (S_ISREG(inode->i_mode))
461 		gfs2_set_aops(inode);
462 
463 	return 0;
464 }
465 
466 /**
467  * gfs2_inode_refresh - Refresh the incore copy of the dinode
468  * @ip: The GFS2 inode
469  *
470  * Returns: errno
471  */
472 
gfs2_inode_refresh(struct gfs2_inode * ip)473 int gfs2_inode_refresh(struct gfs2_inode *ip)
474 {
475 	struct buffer_head *dibh;
476 	int error;
477 
478 	error = gfs2_meta_inode_buffer(ip, &dibh);
479 	if (error)
480 		return error;
481 
482 	error = gfs2_dinode_in(ip, dibh->b_data);
483 	brelse(dibh);
484 	return error;
485 }
486 
487 /**
488  * inode_go_instantiate - read in an inode if necessary
489  * @gl: The glock
490  *
491  * Returns: errno
492  */
493 
inode_go_instantiate(struct gfs2_glock * gl)494 static int inode_go_instantiate(struct gfs2_glock *gl)
495 {
496 	struct gfs2_inode *ip = gl->gl_object;
497 
498 	if (!ip) /* no inode to populate - read it in later */
499 		return 0;
500 
501 	return gfs2_inode_refresh(ip);
502 }
503 
inode_go_held(struct gfs2_holder * gh)504 static int inode_go_held(struct gfs2_holder *gh)
505 {
506 	struct gfs2_glock *gl = gh->gh_gl;
507 	struct gfs2_inode *ip = gl->gl_object;
508 	int error = 0;
509 
510 	if (!ip) /* no inode to populate - read it in later */
511 		return 0;
512 
513 	if (gh->gh_state != LM_ST_DEFERRED)
514 		inode_dio_wait(&ip->i_inode);
515 
516 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
517 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
518 	    (gh->gh_state == LM_ST_EXCLUSIVE))
519 		error = gfs2_truncatei_resume(ip);
520 
521 	return error;
522 }
523 
524 /**
525  * inode_go_dump - print information about an inode
526  * @seq: The iterator
527  * @gl: The glock
528  * @fs_id_buf: file system id (may be empty)
529  *
530  */
531 
inode_go_dump(struct seq_file * seq,const struct gfs2_glock * gl,const char * fs_id_buf)532 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl,
533 			  const char *fs_id_buf)
534 {
535 	struct gfs2_inode *ip = gl->gl_object;
536 	const struct inode *inode = &ip->i_inode;
537 
538 	if (ip == NULL)
539 		return;
540 
541 	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
542 		       "p:%lu\n", fs_id_buf,
543 		  (unsigned long long)ip->i_no_formal_ino,
544 		  (unsigned long long)ip->i_no_addr,
545 		  IF2DT(inode->i_mode), ip->i_flags,
546 		  (unsigned int)ip->i_diskflags,
547 		  (unsigned long long)i_size_read(inode),
548 		  inode->i_data.nrpages);
549 }
550 
551 /**
552  * freeze_go_callback - A cluster node is requesting a freeze
553  * @gl: the glock
554  * @remote: true if this came from a different cluster node
555  */
556 
freeze_go_callback(struct gfs2_glock * gl,bool remote)557 static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
558 {
559 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
560 	struct super_block *sb = sdp->sd_vfs;
561 
562 	if (!remote ||
563 	    (gl->gl_state != LM_ST_SHARED &&
564 	     gl->gl_state != LM_ST_UNLOCKED) ||
565 	    gl->gl_demote_state != LM_ST_UNLOCKED)
566 		return;
567 
568 	/*
569 	 * Try to get an active super block reference to prevent racing with
570 	 * unmount (see super_trylock_shared()).  But note that unmount isn't
571 	 * the only place where a write lock on s_umount is taken, and we can
572 	 * fail here because of things like remount as well.
573 	 */
574 	if (down_read_trylock(&sb->s_umount)) {
575 		atomic_inc(&sb->s_active);
576 		up_read(&sb->s_umount);
577 		if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
578 			deactivate_super(sb);
579 	}
580 }
581 
582 /**
583  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
584  * @gl: the glock
585  */
freeze_go_xmote_bh(struct gfs2_glock * gl)586 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
587 {
588 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
589 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
590 	struct gfs2_glock *j_gl = ip->i_gl;
591 	struct gfs2_log_header_host head;
592 	int error;
593 
594 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
595 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
596 
597 		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
598 		if (gfs2_assert_withdraw_delayed(sdp, !error))
599 			return error;
600 		if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
601 						 GFS2_LOG_HEAD_UNMOUNT))
602 			return -EIO;
603 		sdp->sd_log_sequence = head.lh_sequence + 1;
604 		gfs2_log_pointers_init(sdp, head.lh_blkno);
605 	}
606 	return 0;
607 }
608 
609 /**
610  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
611  * @gl: the glock
612  * @remote: true if this came from a different cluster node
613  *
614  * gl_lockref.lock lock is held while calling this
615  */
iopen_go_callback(struct gfs2_glock * gl,bool remote)616 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
617 {
618 	struct gfs2_inode *ip = gl->gl_object;
619 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
620 
621 	if (!remote || sb_rdonly(sdp->sd_vfs) ||
622 	    test_bit(SDF_KILL, &sdp->sd_flags))
623 		return;
624 
625 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
626 	    gl->gl_state == LM_ST_SHARED && ip) {
627 		gl->gl_lockref.count++;
628 		if (!gfs2_queue_try_to_evict(gl))
629 			gl->gl_lockref.count--;
630 	}
631 }
632 
633 /**
634  * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast
635  * @gl: glock being unlocked
636  *
637  * For now, this is only used for the journal inode glock. In withdraw
638  * situations, we need to wait for the glock to be unlocked so that we know
639  * other nodes may proceed with recovery / journal replay.
640  */
inode_go_unlocked(struct gfs2_glock * gl)641 static void inode_go_unlocked(struct gfs2_glock *gl)
642 {
643 	/* Note that we cannot reference gl_object because it's already set
644 	 * to NULL by this point in its lifecycle. */
645 	if (!test_bit(GLF_UNLOCKED, &gl->gl_flags))
646 		return;
647 	clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags);
648 	wake_up_bit(&gl->gl_flags, GLF_UNLOCKED);
649 }
650 
651 /**
652  * nondisk_go_callback - used to signal when a node did a withdraw
653  * @gl: the nondisk glock
654  * @remote: true if this came from a different cluster node
655  *
656  */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)657 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
658 {
659 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
660 
661 	/* Ignore the callback unless it's from another node, and it's the
662 	   live lock. */
663 	if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
664 		return;
665 
666 	/* First order of business is to cancel the demote request. We don't
667 	 * really want to demote a nondisk glock. At best it's just to inform
668 	 * us of another node's withdraw. We'll keep it in SH mode. */
669 	clear_bit(GLF_DEMOTE, &gl->gl_flags);
670 	clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
671 
672 	/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
673 	if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
674 	    test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
675 	    test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
676 		return;
677 
678 	/* We only care when a node wants us to unlock, because that means
679 	 * they want a journal recovered. */
680 	if (gl->gl_demote_state != LM_ST_UNLOCKED)
681 		return;
682 
683 	if (sdp->sd_args.ar_spectator) {
684 		fs_warn(sdp, "Spectator node cannot recover journals.\n");
685 		return;
686 	}
687 
688 	fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
689 	set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
690 	/*
691 	 * We can't call remote_withdraw directly here or gfs2_recover_journal
692 	 * because this is called from the glock unlock function and the
693 	 * remote_withdraw needs to enqueue and dequeue the same "live" glock
694 	 * we were called from. So we queue it to the control work queue in
695 	 * lock_dlm.
696 	 */
697 	queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
698 }
699 
700 const struct gfs2_glock_operations gfs2_meta_glops = {
701 	.go_type = LM_TYPE_META,
702 	.go_flags = GLOF_NONDISK,
703 };
704 
705 const struct gfs2_glock_operations gfs2_inode_glops = {
706 	.go_sync = inode_go_sync,
707 	.go_inval = inode_go_inval,
708 	.go_instantiate = inode_go_instantiate,
709 	.go_held = inode_go_held,
710 	.go_dump = inode_go_dump,
711 	.go_type = LM_TYPE_INODE,
712 	.go_flags = GLOF_ASPACE | GLOF_LVB,
713 	.go_unlocked = inode_go_unlocked,
714 };
715 
716 const struct gfs2_glock_operations gfs2_rgrp_glops = {
717 	.go_sync = rgrp_go_sync,
718 	.go_inval = rgrp_go_inval,
719 	.go_instantiate = gfs2_rgrp_go_instantiate,
720 	.go_dump = gfs2_rgrp_go_dump,
721 	.go_type = LM_TYPE_RGRP,
722 	.go_flags = GLOF_LVB,
723 };
724 
725 const struct gfs2_glock_operations gfs2_freeze_glops = {
726 	.go_xmote_bh = freeze_go_xmote_bh,
727 	.go_callback = freeze_go_callback,
728 	.go_type = LM_TYPE_NONDISK,
729 	.go_flags = GLOF_NONDISK,
730 };
731 
732 const struct gfs2_glock_operations gfs2_iopen_glops = {
733 	.go_type = LM_TYPE_IOPEN,
734 	.go_callback = iopen_go_callback,
735 	.go_dump = inode_go_dump,
736 	.go_flags = GLOF_NONDISK,
737 	.go_subclass = 1,
738 };
739 
740 const struct gfs2_glock_operations gfs2_flock_glops = {
741 	.go_type = LM_TYPE_FLOCK,
742 	.go_flags = GLOF_NONDISK,
743 };
744 
745 const struct gfs2_glock_operations gfs2_nondisk_glops = {
746 	.go_type = LM_TYPE_NONDISK,
747 	.go_flags = GLOF_NONDISK,
748 	.go_callback = nondisk_go_callback,
749 };
750 
751 const struct gfs2_glock_operations gfs2_quota_glops = {
752 	.go_type = LM_TYPE_QUOTA,
753 	.go_flags = GLOF_LVB | GLOF_NONDISK,
754 };
755 
756 const struct gfs2_glock_operations gfs2_journal_glops = {
757 	.go_type = LM_TYPE_JOURNAL,
758 	.go_flags = GLOF_NONDISK,
759 };
760 
761 const struct gfs2_glock_operations *gfs2_glops_list[] = {
762 	[LM_TYPE_META] = &gfs2_meta_glops,
763 	[LM_TYPE_INODE] = &gfs2_inode_glops,
764 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
765 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
766 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
767 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
768 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
769 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
770 };
771 
772