Lines Matching +full:lock +full:- +full:- +full:- +full:-

1 // SPDX-License-Identifier: GPL-2.0-or-later
35 struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
42 * lock owner downconverted to NL, the bast is now obsolete.
44 * This is needed because the lock and convert paths can queue
45 * asts out-of-band (not waiting for dlm_thread) in order to
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_should_cancel_bast() argument
49 assert_spin_locked(&dlm->ast_lock); in dlm_should_cancel_bast()
50 assert_spin_locked(&lock->spinlock); in dlm_should_cancel_bast()
52 if (lock->ml.highest_blocked == LKM_IVMODE) in dlm_should_cancel_bast()
54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); in dlm_should_cancel_bast()
56 if (lock->bast_pending && in dlm_should_cancel_bast()
57 list_empty(&lock->bast_list)) in dlm_should_cancel_bast()
61 if (lock->ml.type == LKM_EXMODE) in dlm_should_cancel_bast()
64 else if (lock->ml.type == LKM_NLMODE) in dlm_should_cancel_bast()
67 else if (lock->ml.highest_blocked != LKM_EXMODE) in dlm_should_cancel_bast()
74 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in __dlm_queue_ast() argument
79 BUG_ON(!lock); in __dlm_queue_ast()
81 res = lock->lockres; in __dlm_queue_ast()
83 assert_spin_locked(&dlm->ast_lock); in __dlm_queue_ast()
85 if (!list_empty(&lock->ast_list)) { in __dlm_queue_ast()
86 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " in __dlm_queue_ast()
88 dlm->name, res->lockname.len, res->lockname.name, in __dlm_queue_ast()
89 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in __dlm_queue_ast()
90 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), in __dlm_queue_ast()
91 lock->ast_pending, lock->ml.type); in __dlm_queue_ast()
94 if (lock->ast_pending) in __dlm_queue_ast()
95 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", in __dlm_queue_ast()
96 dlm->name, res->lockname.len, res->lockname.name, in __dlm_queue_ast()
97 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in __dlm_queue_ast()
98 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); in __dlm_queue_ast()
100 /* putting lock on list, add a ref */ in __dlm_queue_ast()
101 dlm_lock_get(lock); in __dlm_queue_ast()
102 spin_lock(&lock->spinlock); in __dlm_queue_ast()
105 if (dlm_should_cancel_bast(dlm, lock)) { in __dlm_queue_ast()
106 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", in __dlm_queue_ast()
107 dlm->name, res->lockname.len, res->lockname.name, in __dlm_queue_ast()
108 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in __dlm_queue_ast()
109 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); in __dlm_queue_ast()
110 lock->bast_pending = 0; in __dlm_queue_ast()
111 list_del_init(&lock->bast_list); in __dlm_queue_ast()
112 lock->ml.highest_blocked = LKM_IVMODE; in __dlm_queue_ast()
113 /* removing lock from list, remove a ref. guaranteed in __dlm_queue_ast()
115 * so res->spinlock will not be taken here */ in __dlm_queue_ast()
116 dlm_lock_put(lock); in __dlm_queue_ast()
120 * to get to this point. the res->spinlock will not be in __dlm_queue_ast()
124 list_add_tail(&lock->ast_list, &dlm->pending_asts); in __dlm_queue_ast()
125 lock->ast_pending = 1; in __dlm_queue_ast()
126 spin_unlock(&lock->spinlock); in __dlm_queue_ast()
129 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in dlm_queue_ast() argument
132 BUG_ON(!lock); in dlm_queue_ast()
134 spin_lock(&dlm->ast_lock); in dlm_queue_ast()
135 __dlm_queue_ast(dlm, lock); in dlm_queue_ast()
136 spin_unlock(&dlm->ast_lock); in dlm_queue_ast()
140 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) in __dlm_queue_bast() argument
145 BUG_ON(!lock); in __dlm_queue_bast()
147 assert_spin_locked(&dlm->ast_lock); in __dlm_queue_bast()
149 res = lock->lockres; in __dlm_queue_bast()
151 BUG_ON(!list_empty(&lock->bast_list)); in __dlm_queue_bast()
152 if (lock->bast_pending) in __dlm_queue_bast()
153 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", in __dlm_queue_bast()
154 dlm->name, res->lockname.len, res->lockname.name, in __dlm_queue_bast()
155 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in __dlm_queue_bast()
156 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); in __dlm_queue_bast()
158 /* putting lock on list, add a ref */ in __dlm_queue_bast()
159 dlm_lock_get(lock); in __dlm_queue_bast()
160 spin_lock(&lock->spinlock); in __dlm_queue_bast()
161 list_add_tail(&lock->bast_list, &dlm->pending_basts); in __dlm_queue_bast()
162 lock->bast_pending = 1; in __dlm_queue_bast()
163 spin_unlock(&lock->spinlock); in __dlm_queue_bast()
167 struct dlm_lock *lock) in dlm_update_lvb() argument
169 struct dlm_lockstatus *lksb = lock->lksb; in dlm_update_lvb()
173 spin_lock(&res->spinlock); in dlm_update_lvb()
174 if (res->owner == dlm->node_num) { in dlm_update_lvb()
176 if (lksb->flags & DLM_LKSB_GET_LVB) { in dlm_update_lvb()
178 lock->ml.node == dlm->node_num ? "master" : in dlm_update_lvb()
180 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); in dlm_update_lvb()
182 /* Do nothing for lvb put requests - they should be done in in dlm_update_lvb()
183 * place when the lock is downconverted - otherwise we risk in dlm_update_lvb()
190 spin_unlock(&res->spinlock); in dlm_update_lvb()
193 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); in dlm_update_lvb()
197 struct dlm_lock *lock) in dlm_do_local_ast() argument
201 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, in dlm_do_local_ast()
202 res->lockname.len, res->lockname.name, in dlm_do_local_ast()
203 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in dlm_do_local_ast()
204 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); in dlm_do_local_ast()
206 fn = lock->ast; in dlm_do_local_ast()
207 BUG_ON(lock->ml.node != dlm->node_num); in dlm_do_local_ast()
209 dlm_update_lvb(dlm, res, lock); in dlm_do_local_ast()
210 (*fn)(lock->astdata); in dlm_do_local_ast()
215 struct dlm_lock *lock) in dlm_do_remote_ast() argument
221 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, in dlm_do_remote_ast()
222 res->lockname.len, res->lockname.name, in dlm_do_remote_ast()
223 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in dlm_do_remote_ast()
224 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); in dlm_do_remote_ast()
226 lksb = lock->lksb; in dlm_do_remote_ast()
227 BUG_ON(lock->ml.node == dlm->node_num); in dlm_do_remote_ast()
229 lksbflags = lksb->flags; in dlm_do_remote_ast()
230 dlm_update_lvb(dlm, res, lock); in dlm_do_remote_ast()
232 /* lock request came from another node in dlm_do_remote_ast()
234 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); in dlm_do_remote_ast()
239 struct dlm_lock *lock, int blocked_type) in dlm_do_local_bast() argument
241 dlm_bastlockfunc_t *fn = lock->bast; in dlm_do_local_bast()
243 BUG_ON(lock->ml.node != dlm->node_num); in dlm_do_local_bast()
245 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", in dlm_do_local_bast()
246 dlm->name, res->lockname.len, res->lockname.name, in dlm_do_local_bast()
247 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), in dlm_do_local_bast()
248 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), in dlm_do_local_bast()
251 (*fn)(lock->astdata, blocked_type); in dlm_do_local_bast()
263 struct dlm_lock *lock = NULL; in dlm_proxy_ast_handler() local
264 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; in dlm_proxy_ast_handler()
277 "Domain %s not fully joined!\n", dlm->name); in dlm_proxy_ast_handler()
279 name = past->name; in dlm_proxy_ast_handler()
280 locklen = past->namelen; in dlm_proxy_ast_handler()
281 cookie = past->cookie; in dlm_proxy_ast_handler()
282 flags = be32_to_cpu(past->flags); in dlm_proxy_ast_handler()
283 node = past->node_idx; in dlm_proxy_ast_handler()
303 mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type); in dlm_proxy_ast_handler()
305 if (past->type != DLM_AST && in dlm_proxy_ast_handler()
306 past->type != DLM_BAST) { in dlm_proxy_ast_handler()
308 "name=%.*s, node=%u\n", past->type, in dlm_proxy_ast_handler()
319 "name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"), in dlm_proxy_ast_handler()
328 BUG_ON(res->owner == dlm->node_num); in dlm_proxy_ast_handler()
330 mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, in dlm_proxy_ast_handler()
331 res->lockname.name); in dlm_proxy_ast_handler()
333 spin_lock(&res->spinlock); in dlm_proxy_ast_handler()
334 if (res->state & DLM_LOCK_RES_RECOVERING) { in dlm_proxy_ast_handler()
339 if (res->state & DLM_LOCK_RES_MIGRATING) { in dlm_proxy_ast_handler()
345 head = &res->converting; in dlm_proxy_ast_handler()
346 lock = NULL; in dlm_proxy_ast_handler()
347 list_for_each_entry(lock, head, list) { in dlm_proxy_ast_handler()
348 if (lock->ml.cookie == cookie) in dlm_proxy_ast_handler()
353 if (past->type == DLM_AST) in dlm_proxy_ast_handler()
354 head = &res->blocked; in dlm_proxy_ast_handler()
356 head = &res->granted; in dlm_proxy_ast_handler()
358 list_for_each_entry(lock, head, list) { in dlm_proxy_ast_handler()
359 /* if lock is found but unlock is pending ignore the bast */ in dlm_proxy_ast_handler()
360 if (lock->ml.cookie == cookie) { in dlm_proxy_ast_handler()
361 if (lock->unlock_pending) in dlm_proxy_ast_handler()
367 mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, " in dlm_proxy_ast_handler()
368 "node=%u\n", past->type == DLM_AST ? "" : "b", in dlm_proxy_ast_handler()
375 spin_unlock(&res->spinlock); in dlm_proxy_ast_handler()
380 if (past->type == DLM_AST) { in dlm_proxy_ast_handler()
381 /* do not alter lock refcount. switching lists. */ in dlm_proxy_ast_handler()
382 list_move_tail(&lock->list, &res->granted); in dlm_proxy_ast_handler()
383 mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n", in dlm_proxy_ast_handler()
384 dlm->name, res->lockname.len, res->lockname.name, in dlm_proxy_ast_handler()
387 lock->ml.type, lock->ml.convert_type); in dlm_proxy_ast_handler()
389 if (lock->ml.convert_type != LKM_IVMODE) { in dlm_proxy_ast_handler()
390 lock->ml.type = lock->ml.convert_type; in dlm_proxy_ast_handler()
391 lock->ml.convert_type = LKM_IVMODE; in dlm_proxy_ast_handler()
396 lock->lksb->status = DLM_NORMAL; in dlm_proxy_ast_handler()
400 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB)); in dlm_proxy_ast_handler()
401 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN); in dlm_proxy_ast_handler()
404 spin_unlock(&res->spinlock); in dlm_proxy_ast_handler()
406 if (past->type == DLM_AST) in dlm_proxy_ast_handler()
407 dlm_do_local_ast(dlm, res, lock); in dlm_proxy_ast_handler()
409 dlm_do_local_bast(dlm, res, lock, past->blocked_type); in dlm_proxy_ast_handler()
422 struct dlm_lock *lock, int msg_type, in dlm_send_proxy_ast_msg() argument
431 mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name, in dlm_send_proxy_ast_msg()
432 res->lockname.len, res->lockname.name, lock->ml.node, msg_type, in dlm_send_proxy_ast_msg()
436 past.node_idx = dlm->node_num; in dlm_send_proxy_ast_msg()
439 past.namelen = res->lockname.len; in dlm_send_proxy_ast_msg()
440 memcpy(past.name, res->lockname.name, past.namelen); in dlm_send_proxy_ast_msg()
441 past.cookie = lock->ml.cookie; in dlm_send_proxy_ast_msg()
448 vec[1].iov_base = lock->lksb->lvb; in dlm_send_proxy_ast_msg()
452 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, in dlm_send_proxy_ast_msg()
453 lock->ml.node, &status); in dlm_send_proxy_ast_msg()
456 dlm->name, res->lockname.len, res->lockname.name, ret, in dlm_send_proxy_ast_msg()
457 lock->ml.node); in dlm_send_proxy_ast_msg()
461 "node is dead!\n", lock->ml.node); in dlm_send_proxy_ast_msg()
465 "DLM_MIGRATING!\n", lock->ml.node); in dlm_send_proxy_ast_msg()
469 lock->ml.node, status); in dlm_send_proxy_ast_msg()