1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * AST and BAST functionality for local and remote nodes
7 * Copyright (C) 2004 Oracle. All rights reserved.
11 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/highmem.h>
15 #include <linux/init.h>
16 #include <linux/sysctl.h>
17 #include <linux/random.h>
18 #include <linux/blkdev.h>
19 #include <linux/socket.h>
20 #include <linux/inet.h>
21 #include <linux/spinlock.h>
24 #include "../cluster/heartbeat.h"
25 #include "../cluster/nodemanager.h"
26 #include "../cluster/tcp.h"
29 #include "dlmcommon.h"
31 #define MLOG_MASK_PREFIX ML_DLM
32 #include "../cluster/masklog.h"
34 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
35 struct dlm_lock *lock);
36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
38 /* Should be called as an ast gets queued to see if the new
39 * lock level will obsolete a pending bast.
40 * For example, if dlm_thread queued a bast for an EX lock that
41 * was blocking another EX, but before sending the bast the
42 * lock owner downconverted to NL, the bast is now obsolete.
43 * Only the ast should be sent.
44 * This is needed because the lock and convert paths can queue
45 * asts out-of-band (not waiting for dlm_thread) in order to
46 * allow for LKM_NOQUEUE to get immediate responses. */
47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
49 assert_spin_locked(&dlm->ast_lock);
50 assert_spin_locked(&lock->spinlock);
52 if (lock->ml.highest_blocked == LKM_IVMODE)
54 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
56 if (lock->bast_pending &&
57 list_empty(&lock->bast_list))
58 /* old bast already sent, ok */
61 if (lock->ml.type == LKM_EXMODE)
62 /* EX blocks anything left, any bast still valid */
64 else if (lock->ml.type == LKM_NLMODE)
65 /* NL blocks nothing, no reason to send any bast, cancel it */
67 else if (lock->ml.highest_blocked != LKM_EXMODE)
68 /* PR only blocks EX */
74 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
76 struct dlm_lock_resource *res;
83 assert_spin_locked(&dlm->ast_lock);
85 if (!list_empty(&lock->ast_list)) {
86 mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
87 "AST list not empty, pending %d, newlevel %d\n",
88 dlm->name, res->lockname.len, res->lockname.name,
89 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
90 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
91 lock->ast_pending, lock->ml.type);
94 if (lock->ast_pending)
95 mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
96 dlm->name, res->lockname.len, res->lockname.name,
97 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
98 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
100 /* putting lock on list, add a ref */
102 spin_lock(&lock->spinlock);
104 /* check to see if this ast obsoletes the bast */
105 if (dlm_should_cancel_bast(dlm, lock)) {
106 mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
107 dlm->name, res->lockname.len, res->lockname.name,
108 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
109 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
110 lock->bast_pending = 0;
111 list_del_init(&lock->bast_list);
112 lock->ml.highest_blocked = LKM_IVMODE;
113 /* removing lock from list, remove a ref. guaranteed
114 * this won't be the last ref because of the get above,
115 * so res->spinlock will not be taken here */
117 /* free up the reserved bast that we are cancelling.
118 * guaranteed that this will not be the last reserved
119 * ast because *both* an ast and a bast were reserved
120 * to get to this point. the res->spinlock will not be
122 dlm_lockres_release_ast(dlm, res);
124 list_add_tail(&lock->ast_list, &dlm->pending_asts);
125 lock->ast_pending = 1;
126 spin_unlock(&lock->spinlock);
129 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
134 spin_lock(&dlm->ast_lock);
135 __dlm_queue_ast(dlm, lock);
136 spin_unlock(&dlm->ast_lock);
140 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
142 struct dlm_lock_resource *res;
147 assert_spin_locked(&dlm->ast_lock);
151 BUG_ON(!list_empty(&lock->bast_list));
152 if (lock->bast_pending)
153 mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
154 dlm->name, res->lockname.len, res->lockname.name,
155 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
156 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
158 /* putting lock on list, add a ref */
160 spin_lock(&lock->spinlock);
161 list_add_tail(&lock->bast_list, &dlm->pending_basts);
162 lock->bast_pending = 1;
163 spin_unlock(&lock->spinlock);
166 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
167 struct dlm_lock *lock)
169 struct dlm_lockstatus *lksb = lock->lksb;
172 /* only updates if this node masters the lockres */
173 spin_lock(&res->spinlock);
174 if (res->owner == dlm->node_num) {
175 /* check the lksb flags for the direction */
176 if (lksb->flags & DLM_LKSB_GET_LVB) {
177 mlog(0, "getting lvb from lockres for %s node\n",
178 lock->ml.node == dlm->node_num ? "master" :
180 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
182 /* Do nothing for lvb put requests - they should be done in
183 * place when the lock is downconverted - otherwise we risk
184 * racing gets and puts which could result in old lvb data
185 * being propagated. We leave the put flag set and clear it
186 * here. In the future we might want to clear it at the time
187 * the put is actually done.
190 spin_unlock(&res->spinlock);
192 /* reset any lvb flags on the lksb */
193 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
196 void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
197 struct dlm_lock *lock)
199 dlm_astlockfunc_t *fn;
201 mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
202 res->lockname.len, res->lockname.name,
203 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
204 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
207 BUG_ON(lock->ml.node != dlm->node_num);
209 dlm_update_lvb(dlm, res, lock);
210 (*fn)(lock->astdata);
214 int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
215 struct dlm_lock *lock)
218 struct dlm_lockstatus *lksb;
221 mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
222 res->lockname.len, res->lockname.name,
223 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
224 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
227 BUG_ON(lock->ml.node == dlm->node_num);
229 lksbflags = lksb->flags;
230 dlm_update_lvb(dlm, res, lock);
232 /* lock request came from another node
233 * go do the ast over there */
234 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
238 void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
239 struct dlm_lock *lock, int blocked_type)
241 dlm_bastlockfunc_t *fn = lock->bast;
243 BUG_ON(lock->ml.node != dlm->node_num);
245 mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
246 dlm->name, res->lockname.len, res->lockname.name,
247 dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
248 dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
251 (*fn)(lock->astdata, blocked_type);
256 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
260 unsigned int locklen;
261 struct dlm_ctxt *dlm = data;
262 struct dlm_lock_resource *res = NULL;
263 struct dlm_lock *lock = NULL;
264 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
266 struct list_head *head = NULL;
271 if (!dlm_grab(dlm)) {
272 dlm_error(DLM_REJECTED);
276 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
277 "Domain %s not fully joined!\n", dlm->name);
280 locklen = past->namelen;
281 cookie = past->cookie;
282 flags = be32_to_cpu(past->flags);
283 node = past->node_idx;
285 if (locklen > DLM_LOCKID_NAME_MAX) {
287 mlog(ML_ERROR, "Invalid name length (%d) in proxy ast "
288 "handler!\n", locklen);
292 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
293 (LKM_PUT_LVB|LKM_GET_LVB)) {
294 mlog(ML_ERROR, "Both PUT and GET lvb specified, (0x%x)\n",
300 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
301 (flags & LKM_GET_LVB ? "get lvb" : "none"));
303 mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
305 if (past->type != DLM_AST &&
306 past->type != DLM_BAST) {
307 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu"
308 "name=%.*s, node=%u\n", past->type,
309 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
310 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
311 locklen, name, node);
316 res = dlm_lookup_lockres(dlm, name, locklen);
318 mlog(0, "Got %sast for unknown lockres! cookie=%u:%llu, "
319 "name=%.*s, node=%u\n", (past->type == DLM_AST ? "" : "b"),
320 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
321 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
322 locklen, name, node);
327 /* cannot get a proxy ast message if this node owns it */
328 BUG_ON(res->owner == dlm->node_num);
330 mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
333 spin_lock(&res->spinlock);
334 if (res->state & DLM_LOCK_RES_RECOVERING) {
335 mlog(0, "Responding with DLM_RECOVERING!\n");
336 ret = DLM_RECOVERING;
339 if (res->state & DLM_LOCK_RES_MIGRATING) {
340 mlog(0, "Responding with DLM_MIGRATING!\n");
344 /* try convert queue for both ast/bast */
345 head = &res->converting;
347 list_for_each_entry(lock, head, list) {
348 if (lock->ml.cookie == cookie)
352 /* if not on convert, try blocked for ast, granted for bast */
353 if (past->type == DLM_AST)
354 head = &res->blocked;
356 head = &res->granted;
358 list_for_each_entry(lock, head, list) {
359 /* if lock is found but unlock is pending ignore the bast */
360 if (lock->ml.cookie == cookie) {
361 if (lock->unlock_pending)
367 mlog(0, "Got %sast for unknown lock! cookie=%u:%llu, name=%.*s, "
368 "node=%u\n", past->type == DLM_AST ? "" : "b",
369 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
370 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
371 locklen, name, node);
375 spin_unlock(&res->spinlock);
380 if (past->type == DLM_AST) {
381 /* do not alter lock refcount. switching lists. */
382 list_move_tail(&lock->list, &res->granted);
383 mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
384 dlm->name, res->lockname.len, res->lockname.name,
385 dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
386 dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
387 lock->ml.type, lock->ml.convert_type);
389 if (lock->ml.convert_type != LKM_IVMODE) {
390 lock->ml.type = lock->ml.convert_type;
391 lock->ml.convert_type = LKM_IVMODE;
393 // should already be there....
396 lock->lksb->status = DLM_NORMAL;
398 /* if we requested the lvb, fetch it into our lksb now */
399 if (flags & LKM_GET_LVB) {
400 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
401 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
404 spin_unlock(&res->spinlock);
406 if (past->type == DLM_AST)
407 dlm_do_local_ast(dlm, res, lock);
409 dlm_do_local_bast(dlm, res, lock, past->blocked_type);
413 dlm_lockres_put(res);
421 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
422 struct dlm_lock *lock, int msg_type,
423 int blocked_type, int flags)
426 struct dlm_proxy_ast past;
431 mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
432 res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
435 memset(&past, 0, sizeof(struct dlm_proxy_ast));
436 past.node_idx = dlm->node_num;
437 past.type = msg_type;
438 past.blocked_type = blocked_type;
439 past.namelen = res->lockname.len;
440 memcpy(past.name, res->lockname.name, past.namelen);
441 past.cookie = lock->ml.cookie;
443 vec[0].iov_len = sizeof(struct dlm_proxy_ast);
444 vec[0].iov_base = &past;
445 if (flags & DLM_LKSB_GET_LVB) {
446 be32_add_cpu(&past.flags, LKM_GET_LVB);
447 vec[1].iov_len = DLM_LVB_LEN;
448 vec[1].iov_base = lock->lksb->lvb;
452 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
453 lock->ml.node, &status);
455 mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
456 dlm->name, res->lockname.len, res->lockname.name, ret,
459 if (status == DLM_RECOVERING) {
460 mlog(ML_ERROR, "sent AST to node %u, it thinks this "
461 "node is dead!\n", lock->ml.node);
463 } else if (status == DLM_MIGRATING) {
464 mlog(ML_ERROR, "sent AST to node %u, it returned "
465 "DLM_MIGRATING!\n", lock->ml.node);
467 } else if (status != DLM_NORMAL && status != DLM_IVLOCKID) {
468 mlog(ML_ERROR, "AST to node %u returned %d!\n",
469 lock->ml.node, status);