Lines Matching +full:out +full:- +full:volume +full:- +full:limit

1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* vnode and volume validity verification.
19 * (2) On a RW volume, in response to certain vnode (inode)-accessing RPC
20 * calls, the server maintains a time-limited per-vnode promise that it
24 * Note that a vnode-level callbacks may also be sent for other reasons,
27 * (3) On a RO (or Backup) volume, in response to certain vnode-accessing RPC
28 * calls, each server maintains a time-limited per-volume promise that it
29 * will send us a CB.CallBack request if the RO volume is updated to a
30 * snapshot of the RW volume ("vos release"). This is an atomic event
31 * that cuts over all instances of the RO volume across multiple servers
34 * Note that a volume-level callbacks may also be sent for other reasons,
35 * such as the volumeserver taking over control of the volume from the
38 * Note also that each server maintains an independent time limit on an
41 * (4) Certain RPC calls include a volume information record "VolSync" in
42 * their reply. This contains a creation date for the volume that should
43 * remain unchanged for a RW volume (but will be changed if the volume is
45 * when a RO volume is released.
49 * ->cb_v_break. A counter of events that might mean that the contents of
50 * a volume have been altered since we last checked a vnode.
52 * ->cb_v_check. A counter of the number of events that we've sent a
56 * ->cb_scrub. A counter of the number of regression events for which we
59 * ->cb_ro_snapshot. A counter of the number of times that we've
60 * recognised that a RO volume has been updated.
62 * ->cb_break. A counter of events that might mean that the contents of a
65 * ->cb_expires_at. The time at which the callback promise expires or
70 * (1) When a volume-level CB.CallBack occurs, we increment ->cb_v_break on
71 * the volume and reset ->cb_expires_at (ie. set AFS_NO_CB_PROMISE) on the
72 * volume and volume's server record.
74 * (2) When a CB.InitCallBackState occurs, we treat this as a volume-level
75 * callback break on all the volumes that have been using that volume
76 * (ie. increment ->cb_v_break and reset ->cb_expires_at).
78 * (3) When a vnode-level CB.CallBack occurs, we increment ->cb_break on the
79 * vnode and reset its ->cb_expires_at. If the vnode is mmapped, we also
84 * validity of a vnode. This first checks to see if ->cb_v_check and
85 * ->cb_v_break match, and if they don't, we lock volume->cb_check_lock
88 * After checking the volume, we check the vnode. If there's a mismatch
89 * between the volume counters and the vnode's mirrors of those counters,
90 * we lock vnode->validate_lock and issue an FS.FetchStatus on the vnode.
95 * (A) If the Creation timestamp has changed on a RW volume or regressed
96 * on a RO volume, we try to increment ->cb_scrub; if it advances on a
97 * RO volume, we assume "vos release" happened and try to increment
98 * ->cb_ro_snapshot.
101 * ->cb_scrub.
107 * volume->cb_v_check is then set to ->cb_v_break.
110 * parsed and used to set the promise in ->cb_expires_at for the vnode,
111 * the volume and the volume's server record.
113 * (7) If ->cb_scrub is seen to have advanced, we invalidate the pagecache for
118 * Check the validity of a vnode/inode and its parent volume.
122 const struct afs_volume *volume = vnode->volume; in afs_check_validity() local
125 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) in afs_check_validity()
128 if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) || in afs_check_validity()
129 atomic64_read(&vnode->cb_expires_at) <= deadline || in afs_check_validity()
130 volume->cb_expires_at <= deadline || in afs_check_validity()
131 vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot) || in afs_check_validity()
132 vnode->cb_scrub != atomic_read(&volume->cb_scrub) || in afs_check_validity()
133 test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { in afs_check_validity()
144 static bool __afs_is_server_excluded(struct afs_operation *op, struct afs_volume *volume) in __afs_is_server_excluded() argument
153 slist = rcu_dereference(volume->servers); in __afs_is_server_excluded()
154 for (i = 0; i < slist->nr_servers; i++) { in __afs_is_server_excluded()
155 se = &slist->servers[i]; in __afs_is_server_excluded()
156 if (op->server == se->server) { in __afs_is_server_excluded()
157 is_excluded = test_bit(AFS_SE_EXCLUDED, &se->flags); in __afs_is_server_excluded()
167 * Update the volume's server list when the creation time changes and see if
170 static int afs_is_server_excluded(struct afs_operation *op, struct afs_volume *volume) in afs_is_server_excluded() argument
174 if (__afs_is_server_excluded(op, volume)) in afs_is_server_excluded()
177 set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); in afs_is_server_excluded()
178 ret = afs_check_volume_status(op->volume, op); in afs_is_server_excluded()
182 return __afs_is_server_excluded(op, volume); in afs_is_server_excluded()
186 * Handle a change to the volume creation time in the VolSync record.
188 static int afs_update_volume_creation_time(struct afs_operation *op, struct afs_volume *volume) in afs_update_volume_creation_time() argument
191 time64_t cur = volume->creation_time; in afs_update_volume_creation_time()
192 time64_t old = op->pre_volsync.creation; in afs_update_volume_creation_time()
193 time64_t new = op->volsync.creation; in afs_update_volume_creation_time()
196 _enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new); in afs_update_volume_creation_time()
199 volume->creation_time = new; in afs_update_volume_creation_time()
215 * our caches. For a RW vol, this will only change if the volume is in afs_update_volume_creation_time()
217 * the volume is updated to a new snapshot (eg. "vos release"). in afs_update_volume_creation_time()
219 if (volume->type == AFSVL_RWVOL) in afs_update_volume_creation_time()
221 if (volume->type == AFSVL_BACKVOL) { in afs_update_volume_creation_time()
227 /* We have an RO volume, we need to query the VL server and look at the in afs_update_volume_creation_time()
228 * server flags to see if RW->RO replication is in progress. in afs_update_volume_creation_time()
230 ret = afs_is_server_excluded(op, volume); in afs_update_volume_creation_time()
234 snap = atomic_read(&volume->cb_ro_snapshot); in afs_update_volume_creation_time()
235 trace_afs_cb_v_break(volume->vid, snap, afs_cb_break_volume_excluded); in afs_update_volume_creation_time()
240 snap = atomic_inc_return(&volume->cb_ro_snapshot); in afs_update_volume_creation_time()
241 trace_afs_cb_v_break(volume->vid, snap, afs_cb_break_for_vos_release); in afs_update_volume_creation_time()
242 volume->creation_time = new; in afs_update_volume_creation_time()
246 atomic_inc(&volume->cb_scrub); in afs_update_volume_creation_time()
247 trace_afs_cb_v_break(volume->vid, 0, afs_cb_break_for_creation_regress); in afs_update_volume_creation_time()
248 volume->creation_time = new; in afs_update_volume_creation_time()
253 * Handle a change to the volume update time in the VolSync record.
255 static void afs_update_volume_update_time(struct afs_operation *op, struct afs_volume *volume) in afs_update_volume_update_time() argument
258 time64_t cur = volume->update_time; in afs_update_volume_update_time()
259 time64_t old = op->pre_volsync.update; in afs_update_volume_update_time()
260 time64_t new = op->volsync.update; in afs_update_volume_update_time()
262 _enter("%llx,%llx,%llx->%llx", volume->vid, cur, old, new); in afs_update_volume_update_time()
265 volume->update_time = new; in afs_update_volume_update_time()
272 /* If the volume update time changes in an unexpected way, we need to in afs_update_volume_update_time()
275 * volume is updated to a new snapshot (eg. "vos release"). in afs_update_volume_update_time()
287 atomic_inc(&volume->cb_scrub); in afs_update_volume_update_time()
288 trace_afs_cb_v_break(volume->vid, 0, reason); in afs_update_volume_update_time()
290 volume->update_time = new; in afs_update_volume_update_time()
294 static int afs_update_volume_times(struct afs_operation *op, struct afs_volume *volume) in afs_update_volume_times() argument
298 if (likely(op->volsync.creation == volume->creation_time && in afs_update_volume_times()
299 op->volsync.update == volume->update_time)) in afs_update_volume_times()
302 mutex_lock(&volume->volsync_lock); in afs_update_volume_times()
303 if (op->volsync.creation != volume->creation_time) { in afs_update_volume_times()
304 ret = afs_update_volume_creation_time(op, volume); in afs_update_volume_times()
306 goto out; in afs_update_volume_times()
308 if (op->volsync.update != volume->update_time) in afs_update_volume_times()
309 afs_update_volume_update_time(op, volume); in afs_update_volume_times()
310 out: in afs_update_volume_times()
311 mutex_unlock(&volume->volsync_lock); in afs_update_volume_times()
316 * Update the state of a volume, including recording the expiration time of the
321 struct afs_server_list *slist = op->server_list; in afs_update_volume_state()
322 struct afs_server_entry *se = &slist->servers[op->server_index]; in afs_update_volume_state()
323 struct afs_callback *cb = &op->file[0].scb.callback; in afs_update_volume_state()
324 struct afs_volume *volume = op->volume; in afs_update_volume_state() local
325 unsigned int cb_v_break = atomic_read(&volume->cb_v_break); in afs_update_volume_state()
326 unsigned int cb_v_check = atomic_read(&volume->cb_v_check); in afs_update_volume_state()
329 _enter("%llx", op->volume->vid); in afs_update_volume_state()
331 if (op->volsync.creation != TIME64_MIN || op->volsync.update != TIME64_MIN) { in afs_update_volume_state()
332 ret = afs_update_volume_times(op, volume); in afs_update_volume_state()
339 if (op->cb_v_break == cb_v_break && in afs_update_volume_state()
340 (op->file[0].scb.have_cb || op->file[1].scb.have_cb)) { in afs_update_volume_state()
341 time64_t expires_at = cb->expires_at; in afs_update_volume_state()
343 if (!op->file[0].scb.have_cb) in afs_update_volume_state()
344 expires_at = op->file[1].scb.callback.expires_at; in afs_update_volume_state()
346 se->cb_expires_at = expires_at; in afs_update_volume_state()
347 volume->cb_expires_at = expires_at; in afs_update_volume_state()
349 if (cb_v_check < op->cb_v_break) in afs_update_volume_state()
350 atomic_cmpxchg(&volume->cb_v_check, cb_v_check, op->cb_v_break); in afs_update_volume_state()
356 * - might also want to ditch all the outstanding writes and dirty pages
360 _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode); in afs_zap_data()
364 /* nuke all the non-dirty pages that aren't locked, mapped or being in afs_zap_data()
367 if (S_ISREG(vnode->netfs.inode.i_mode)) in afs_zap_data()
368 filemap_invalidate_inode(&vnode->netfs.inode, true, 0, LLONG_MAX); in afs_zap_data()
370 filemap_invalidate_inode(&vnode->netfs.inode, false, 0, LLONG_MAX); in afs_zap_data()
375 * - there are several things we need to check
376 * - parent dir data changes (rm, rmdir, rename, mkdir, create, link,
378 * - parent dir metadata changed (security changes)
379 * - dentry data changed (write, truncate)
380 * - dentry metadata changed (security changes)
384 struct afs_volume *volume = vnode->volume; in afs_validate() local
391 vnode->fid.vid, vnode->fid.vnode, vnode->flags, in afs_validate()
395 return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0; in afs_validate()
397 ret = down_write_killable(&vnode->validate_lock); in afs_validate()
401 if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { in afs_validate()
402 ret = -ESTALE; in afs_validate()
406 /* Validate a volume after the v_break has changed or the volume in afs_validate()
407 * callback expired. We only want to do this once per volume per in afs_validate()
411 if (volume->cb_expires_at <= deadline || in afs_validate()
412 atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break)) { in afs_validate()
413 ret = mutex_lock_interruptible(&volume->cb_check_lock); in afs_validate()
419 cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot); in afs_validate()
420 cb_scrub = atomic_read(&volume->cb_scrub); in afs_validate()
421 if (vnode->cb_ro_snapshot != cb_ro_snapshot || in afs_validate()
422 vnode->cb_scrub != cb_scrub) in afs_validate()
423 unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false); in afs_validate()
425 if (vnode->cb_ro_snapshot != cb_ro_snapshot || in afs_validate()
426 vnode->cb_scrub != cb_scrub || in afs_validate()
427 volume->cb_expires_at <= deadline || in afs_validate()
428 atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) || in afs_validate()
429 atomic64_read(&vnode->cb_expires_at) <= deadline in afs_validate()
433 if (ret == -ENOENT) { in afs_validate()
434 set_bit(AFS_VNODE_DELETED, &vnode->flags); in afs_validate()
435 ret = -ESTALE; in afs_validate()
440 _debug("new promise [fl=%lx]", vnode->flags); in afs_validate()
443 /* We can drop the volume lock now as. */ in afs_validate()
445 mutex_unlock(&volume->cb_check_lock); in afs_validate()
449 cb_ro_snapshot = atomic_read(&volume->cb_ro_snapshot); in afs_validate()
450 cb_scrub = atomic_read(&volume->cb_scrub); in afs_validate()
452 vnode->cb_ro_snapshot, cb_ro_snapshot, in afs_validate()
453 vnode->cb_scrub, cb_scrub); in afs_validate()
454 if (vnode->cb_scrub != cb_scrub) in afs_validate()
456 vnode->cb_ro_snapshot = cb_ro_snapshot; in afs_validate()
457 vnode->cb_scrub = cb_scrub; in afs_validate()
461 zap |= test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); in afs_validate()
464 up_write(&vnode->validate_lock); in afs_validate()
470 mutex_unlock(&volume->cb_check_lock); in afs_validate()
471 up_write(&vnode->validate_lock); in afs_validate()