1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
8 *******************************************************************************
9 ******************************************************************************/
11 /* Central locking logic has four stages:
31 Stage 1 (lock, unlock) is mainly about checking input args and
32 splitting into one of the four main operations:
34 dlm_lock = request_lock
35 dlm_lock+CONVERT = convert_lock
36 dlm_unlock = unlock_lock
37 dlm_unlock+CANCEL = cancel_lock
39 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
40 provided to the next stage.
42 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
43 When remote, it calls send_xxxx(), when local it calls do_xxxx().
45 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
46 given rsb and lkb and queues callbacks.
48 For remote operations, send_xxxx() results in the corresponding do_xxxx()
49 function being executed on the remote node. The connecting send/receive
50 calls on local (L) and remote (R) nodes:
52 L: send_xxxx() -> R: receive_xxxx()
54 L: receive_xxxx_reply() <- R: send_xxxx_reply()
56 #include <trace/events/dlm.h>
58 #include <linux/types.h>
59 #include <linux/rbtree.h>
60 #include <linux/slab.h>
61 #include "dlm_internal.h"
62 #include <linux/dlm_device.h>
65 #include "requestqueue.h"
69 #include "lockspace.h"
74 #include "lvb_table.h"
78 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int send_remove(struct dlm_rsb *r);
86 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
91 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92 static void del_timeout(struct dlm_lkb *lkb);
93 static void toss_rsb(struct kref *kref);
96 * Lock compatibilty matrix - thanks Steve
97 * UN = Unlocked state. Not really a state, used as a flag
98 * PD = Padding. Used to make the matrix a nice power of two in size
99 * Other states are the same as the VMS DLM.
100 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
103 static const int __dlm_compat_matrix[8][8] = {
104 /* UN NL CR CW PR PW EX PD */
105 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
106 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
107 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
108 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
109 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
110 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
111 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
112 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
116 * This defines the direction of transfer of LVB data.
117 * Granted mode is the row; requested mode is the column.
118 * Usage: matrix[grmode+1][rqmode+1]
119 * 1 = LVB is returned to the caller
120 * 0 = LVB is written to the resource
121 * -1 = nothing happens to the LVB
124 const int dlm_lvb_operations[8][8] = {
125 /* UN NL CR CW PR PW EX PD*/
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
136 #define modes_compat(gr, rq) \
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
139 int dlm_modes_compat(int mode1, int mode2)
141 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
145 * Compatibility matrix for conversions with QUECVT set.
146 * Granted mode is the row; requested mode is the column.
147 * Usage: matrix[grmode+1][rqmode+1]
150 static const int __quecvt_compat_matrix[8][8] = {
151 /* UN NL CR CW PR PW EX PD */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
153 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
154 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
155 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
156 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
157 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
158 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
159 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
162 void dlm_print_lkb(struct dlm_lkb *lkb)
164 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
165 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 (unsigned long long)lkb->lkb_recover_seq);
172 static void dlm_print_rsb(struct dlm_rsb *r)
174 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
181 void dlm_dump_rsb(struct dlm_rsb *r)
187 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 printk(KERN_ERR "rsb lookup list\n");
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
192 printk(KERN_ERR "rsb grant queue:\n");
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
195 printk(KERN_ERR "rsb convert queue:\n");
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
198 printk(KERN_ERR "rsb wait queue:\n");
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
203 /* Threads cannot use the lockspace while it's being recovered */
205 static inline void dlm_lock_recovery(struct dlm_ls *ls)
207 down_read(&ls->ls_in_recovery);
210 void dlm_unlock_recovery(struct dlm_ls *ls)
212 up_read(&ls->ls_in_recovery);
215 int dlm_lock_recovery_try(struct dlm_ls *ls)
217 return down_read_trylock(&ls->ls_in_recovery);
220 static inline int can_be_queued(struct dlm_lkb *lkb)
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
225 static inline int force_blocking_asts(struct dlm_lkb *lkb)
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
230 static inline int is_demoted(struct dlm_lkb *lkb)
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
235 static inline int is_altmode(struct dlm_lkb *lkb)
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
240 static inline int is_granted(struct dlm_lkb *lkb)
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
245 static inline int is_remote(struct dlm_rsb *r)
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 return !!r->res_nodeid;
251 static inline int is_process_copy(struct dlm_lkb *lkb)
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
256 static inline int is_master_copy(struct dlm_lkb *lkb)
258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
261 static inline int middle_conversion(struct dlm_lkb *lkb)
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
269 static inline int down_conversion(struct dlm_lkb *lkb)
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
274 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
279 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
284 static inline int is_overlap(struct dlm_lkb *lkb)
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 DLM_IFL_OVERLAP_CANCEL));
290 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
292 if (is_master_copy(lkb))
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
299 #ifdef CONFIG_DLM_DEPRECATED_API
300 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
301 timeout caused the cancel then return -ETIMEDOUT */
302 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
303 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
308 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
309 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
313 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
316 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
319 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
322 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
324 if (is_master_copy(lkb)) {
325 send_bast(r, lkb, rqmode);
327 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
332 * Basic operations on rsb's and lkb's
335 /* This is only called to add a reference when the code already holds
336 a valid reference to the rsb, so there's no need for locking. */
338 static inline void hold_rsb(struct dlm_rsb *r)
340 kref_get(&r->res_ref);
343 void dlm_hold_rsb(struct dlm_rsb *r)
348 /* When all references to the rsb are gone it's transferred to
349 the tossed list for later disposal. */
351 static void put_rsb(struct dlm_rsb *r)
353 struct dlm_ls *ls = r->res_ls;
354 uint32_t bucket = r->res_bucket;
357 rv = kref_put_lock(&r->res_ref, toss_rsb,
358 &ls->ls_rsbtbl[bucket].lock);
360 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
363 void dlm_put_rsb(struct dlm_rsb *r)
368 static int pre_rsb_struct(struct dlm_ls *ls)
370 struct dlm_rsb *r1, *r2;
373 spin_lock(&ls->ls_new_rsb_spin);
374 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
375 spin_unlock(&ls->ls_new_rsb_spin);
378 spin_unlock(&ls->ls_new_rsb_spin);
380 r1 = dlm_allocate_rsb(ls);
381 r2 = dlm_allocate_rsb(ls);
383 spin_lock(&ls->ls_new_rsb_spin);
385 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
386 ls->ls_new_rsb_count++;
389 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
390 ls->ls_new_rsb_count++;
392 count = ls->ls_new_rsb_count;
393 spin_unlock(&ls->ls_new_rsb_spin);
400 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
401 unlock any spinlocks, go back and call pre_rsb_struct again.
402 Otherwise, take an rsb off the list and return it. */
404 static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len,
405 struct dlm_rsb **r_ret)
410 spin_lock(&ls->ls_new_rsb_spin);
411 if (list_empty(&ls->ls_new_rsb)) {
412 count = ls->ls_new_rsb_count;
413 spin_unlock(&ls->ls_new_rsb_spin);
414 log_debug(ls, "find_rsb retry %d %d %s",
415 count, dlm_config.ci_new_rsb_count,
420 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
421 list_del(&r->res_hashchain);
422 /* Convert the empty list_head to a NULL rb_node for tree usage: */
423 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
424 ls->ls_new_rsb_count--;
425 spin_unlock(&ls->ls_new_rsb_spin);
429 memcpy(r->res_name, name, len);
430 mutex_init(&r->res_mutex);
432 INIT_LIST_HEAD(&r->res_lookup);
433 INIT_LIST_HEAD(&r->res_grantqueue);
434 INIT_LIST_HEAD(&r->res_convertqueue);
435 INIT_LIST_HEAD(&r->res_waitqueue);
436 INIT_LIST_HEAD(&r->res_root_list);
437 INIT_LIST_HEAD(&r->res_recover_list);
443 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
445 char maxname[DLM_RESNAME_MAXLEN];
447 memset(maxname, 0, DLM_RESNAME_MAXLEN);
448 memcpy(maxname, name, nlen);
449 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
452 int dlm_search_rsb_tree(struct rb_root *tree, const void *name, int len,
453 struct dlm_rsb **r_ret)
455 struct rb_node *node = tree->rb_node;
460 r = rb_entry(node, struct dlm_rsb, res_hashnode);
461 rc = rsb_cmp(r, name, len);
463 node = node->rb_left;
465 node = node->rb_right;
477 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
479 struct rb_node **newn = &tree->rb_node;
480 struct rb_node *parent = NULL;
484 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
488 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
490 newn = &parent->rb_left;
492 newn = &parent->rb_right;
494 log_print("rsb_insert match");
501 rb_link_node(&rsb->res_hashnode, parent, newn);
502 rb_insert_color(&rsb->res_hashnode, tree);
507 * Find rsb in rsbtbl and potentially create/add one
509 * Delaying the release of rsb's has a similar benefit to applications keeping
510 * NL locks on an rsb, but without the guarantee that the cached master value
511 * will still be valid when the rsb is reused. Apps aren't always smart enough
512 * to keep NL locks on an rsb that they may lock again shortly; this can lead
513 * to excessive master lookups and removals if we don't delay the release.
515 * Searching for an rsb means looking through both the normal list and toss
516 * list. When found on the toss list the rsb is moved to the normal list with
517 * ref count of 1; when found on normal list the ref count is incremented.
519 * rsb's on the keep list are being used locally and refcounted.
520 * rsb's on the toss list are not being used locally, and are not refcounted.
522 * The toss list rsb's were either
523 * - previously used locally but not any more (were on keep list, then
524 * moved to toss list when last refcount dropped)
525 * - created and put on toss list as a directory record for a lookup
526 * (we are the dir node for the res, but are not using the res right now,
527 * but some other node is)
529 * The purpose of find_rsb() is to return a refcounted rsb for local use.
530 * So, if the given rsb is on the toss list, it is moved to the keep list
531 * before being returned.
533 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
534 * more refcounts exist, so the rsb is moved from the keep list to the
537 * rsb's on both keep and toss lists are used for doing a name to master
538 * lookups. rsb's that are in use locally (and being refcounted) are on
539 * the keep list, rsb's that are not in use locally (not refcounted) and
540 * only exist for name/master lookups are on the toss list.
542 * rsb's on the toss list who's dir_nodeid is not local can have stale
543 * name/master mappings. So, remote requests on such rsb's can potentially
544 * return with an error, which means the mapping is stale and needs to
545 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
546 * first_lkid is to keep only a single outstanding request on an rsb
547 * while that rsb has a potentially stale master.)
550 static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len,
551 uint32_t hash, uint32_t b,
552 int dir_nodeid, int from_nodeid,
553 unsigned int flags, struct dlm_rsb **r_ret)
555 struct dlm_rsb *r = NULL;
556 int our_nodeid = dlm_our_nodeid();
563 if (flags & R_RECEIVE_REQUEST) {
564 if (from_nodeid == dir_nodeid)
568 } else if (flags & R_REQUEST) {
573 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
574 * from_nodeid has sent us a lock in dlm_recover_locks, believing
575 * we're the new master. Our local recovery may not have set
576 * res_master_nodeid to our_nodeid yet, so allow either. Don't
577 * create the rsb; dlm_recover_process_copy() will handle EBADR
580 * If someone sends us a request, we are the dir node, and we do
581 * not find the rsb anywhere, then recreate it. This happens if
582 * someone sends us a request after we have removed/freed an rsb
583 * from our toss list. (They sent a request instead of lookup
584 * because they are using an rsb from their toss list.)
587 if (from_local || from_dir ||
588 (from_other && (dir_nodeid == our_nodeid))) {
594 error = pre_rsb_struct(ls);
599 spin_lock(&ls->ls_rsbtbl[b].lock);
601 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
606 * rsb is active, so we can't check master_nodeid without lock_rsb.
609 kref_get(&r->res_ref);
614 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
619 * rsb found inactive (master_nodeid may be out of date unless
620 * we are the dir_nodeid or were the master) No other thread
621 * is using this rsb because it's on the toss list, so we can
622 * look at or update res_master_nodeid without lock_rsb.
625 if ((r->res_master_nodeid != our_nodeid) && from_other) {
626 /* our rsb was not master, and another node (not the dir node)
627 has sent us a request */
628 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
629 from_nodeid, r->res_master_nodeid, dir_nodeid,
635 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
636 /* don't think this should ever happen */
637 log_error(ls, "find_rsb toss from_dir %d master %d",
638 from_nodeid, r->res_master_nodeid);
640 /* fix it and go on */
641 r->res_master_nodeid = our_nodeid;
643 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
644 r->res_first_lkid = 0;
647 if (from_local && (r->res_master_nodeid != our_nodeid)) {
648 /* Because we have held no locks on this rsb,
649 res_master_nodeid could have become stale. */
650 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
651 r->res_first_lkid = 0;
654 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
655 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
664 if (error == -EBADR && !create)
667 error = get_rsb_struct(ls, name, len, &r);
668 if (error == -EAGAIN) {
669 spin_unlock(&ls->ls_rsbtbl[b].lock);
677 r->res_dir_nodeid = dir_nodeid;
678 kref_init(&r->res_ref);
681 /* want to see how often this happens */
682 log_debug(ls, "find_rsb new from_dir %d recreate %s",
683 from_nodeid, r->res_name);
684 r->res_master_nodeid = our_nodeid;
689 if (from_other && (dir_nodeid != our_nodeid)) {
690 /* should never happen */
691 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
692 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
700 log_debug(ls, "find_rsb new from_other %d dir %d %s",
701 from_nodeid, dir_nodeid, r->res_name);
704 if (dir_nodeid == our_nodeid) {
705 /* When we are the dir nodeid, we can set the master
707 r->res_master_nodeid = our_nodeid;
710 /* set_master will send_lookup to dir_nodeid */
711 r->res_master_nodeid = 0;
716 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
718 spin_unlock(&ls->ls_rsbtbl[b].lock);
724 /* During recovery, other nodes can send us new MSTCPY locks (from
725 dlm_recover_locks) before we've made ourself master (in
726 dlm_recover_masters). */
728 static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len,
729 uint32_t hash, uint32_t b,
730 int dir_nodeid, int from_nodeid,
731 unsigned int flags, struct dlm_rsb **r_ret)
733 struct dlm_rsb *r = NULL;
734 int our_nodeid = dlm_our_nodeid();
735 int recover = (flags & R_RECEIVE_RECOVER);
739 error = pre_rsb_struct(ls);
743 spin_lock(&ls->ls_rsbtbl[b].lock);
745 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
750 * rsb is active, so we can't check master_nodeid without lock_rsb.
753 kref_get(&r->res_ref);
758 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
763 * rsb found inactive. No other thread is using this rsb because
764 * it's on the toss list, so we can look at or update
765 * res_master_nodeid without lock_rsb.
768 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
769 /* our rsb is not master, and another node has sent us a
770 request; this should never happen */
771 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
772 from_nodeid, r->res_master_nodeid, dir_nodeid);
778 if (!recover && (r->res_master_nodeid != our_nodeid) &&
779 (dir_nodeid == our_nodeid)) {
780 /* our rsb is not master, and we are dir; may as well fix it;
781 this should never happen */
782 log_error(ls, "find_rsb toss our %d master %d dir %d",
783 our_nodeid, r->res_master_nodeid, dir_nodeid);
785 r->res_master_nodeid = our_nodeid;
789 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
790 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
799 error = get_rsb_struct(ls, name, len, &r);
800 if (error == -EAGAIN) {
801 spin_unlock(&ls->ls_rsbtbl[b].lock);
809 r->res_dir_nodeid = dir_nodeid;
810 r->res_master_nodeid = dir_nodeid;
811 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
812 kref_init(&r->res_ref);
814 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
816 spin_unlock(&ls->ls_rsbtbl[b].lock);
822 static int find_rsb(struct dlm_ls *ls, const void *name, int len,
823 int from_nodeid, unsigned int flags,
824 struct dlm_rsb **r_ret)
829 if (len > DLM_RESNAME_MAXLEN)
832 hash = jhash(name, len, 0);
833 b = hash & (ls->ls_rsbtbl_size - 1);
835 dir_nodeid = dlm_hash2nodeid(ls, hash);
837 if (dlm_no_directory(ls))
838 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
839 from_nodeid, flags, r_ret);
841 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
842 from_nodeid, flags, r_ret);
845 /* we have received a request and found that res_master_nodeid != our_nodeid,
846 so we need to return an error or make ourself the master */
848 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
851 if (dlm_no_directory(ls)) {
852 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
853 from_nodeid, r->res_master_nodeid,
859 if (from_nodeid != r->res_dir_nodeid) {
860 /* our rsb is not master, and another node (not the dir node)
861 has sent us a request. this is much more common when our
862 master_nodeid is zero, so limit debug to non-zero. */
864 if (r->res_master_nodeid) {
865 log_debug(ls, "validate master from_other %d master %d "
866 "dir %d first %x %s", from_nodeid,
867 r->res_master_nodeid, r->res_dir_nodeid,
868 r->res_first_lkid, r->res_name);
872 /* our rsb is not master, but the dir nodeid has sent us a
873 request; this could happen with master 0 / res_nodeid -1 */
875 if (r->res_master_nodeid) {
876 log_error(ls, "validate master from_dir %d master %d "
878 from_nodeid, r->res_master_nodeid,
879 r->res_first_lkid, r->res_name);
882 r->res_master_nodeid = dlm_our_nodeid();
888 static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_nodeid,
889 int from_nodeid, bool toss_list, unsigned int flags,
890 int *r_nodeid, int *result)
892 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
893 int from_master = (flags & DLM_LU_RECOVER_DIR);
895 if (r->res_dir_nodeid != our_nodeid) {
896 /* should not happen, but may as well fix it and carry on */
897 log_error(ls, "%s res_dir %d our %d %s", __func__,
898 r->res_dir_nodeid, our_nodeid, r->res_name);
899 r->res_dir_nodeid = our_nodeid;
902 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
903 /* Recovery uses this function to set a new master when
904 * the previous master failed. Setting NEW_MASTER will
905 * force dlm_recover_masters to call recover_master on this
906 * rsb even though the res_nodeid is no longer removed.
909 r->res_master_nodeid = from_nodeid;
910 r->res_nodeid = from_nodeid;
911 rsb_set_flag(r, RSB_NEW_MASTER);
914 /* I don't think we should ever find it on toss list. */
915 log_error(ls, "%s fix_master on toss", __func__);
920 if (from_master && (r->res_master_nodeid != from_nodeid)) {
921 /* this will happen if from_nodeid became master during
922 * a previous recovery cycle, and we aborted the previous
923 * cycle before recovering this master value
926 log_limit(ls, "%s from_master %d master_nodeid %d res_nodeid %d first %x %s",
927 __func__, from_nodeid, r->res_master_nodeid,
928 r->res_nodeid, r->res_first_lkid, r->res_name);
930 if (r->res_master_nodeid == our_nodeid) {
931 log_error(ls, "from_master %d our_master", from_nodeid);
936 r->res_master_nodeid = from_nodeid;
937 r->res_nodeid = from_nodeid;
938 rsb_set_flag(r, RSB_NEW_MASTER);
941 if (!r->res_master_nodeid) {
942 /* this will happen if recovery happens while we're looking
943 * up the master for this rsb
946 log_debug(ls, "%s master 0 to %d first %x %s", __func__,
947 from_nodeid, r->res_first_lkid, r->res_name);
948 r->res_master_nodeid = from_nodeid;
949 r->res_nodeid = from_nodeid;
952 if (!from_master && !fix_master &&
953 (r->res_master_nodeid == from_nodeid)) {
954 /* this can happen when the master sends remove, the dir node
955 * finds the rsb on the keep list and ignores the remove,
956 * and the former master sends a lookup
959 log_limit(ls, "%s from master %d flags %x first %x %s",
960 __func__, from_nodeid, flags, r->res_first_lkid,
965 *r_nodeid = r->res_master_nodeid;
967 *result = DLM_LU_MATCH;
971 * We're the dir node for this res and another node wants to know the
972 * master nodeid. During normal operation (non recovery) this is only
973 * called from receive_lookup(); master lookups when the local node is
974 * the dir node are done by find_rsb().
976 * normal operation, we are the dir node for a resource
981 * . dlm_master_lookup flags 0
983 * recover directory, we are rebuilding dir for all resources
984 * . dlm_recover_directory
986 * remote node sends back the rsb names it is master of and we are dir of
987 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
988 * we either create new rsb setting remote node as master, or find existing
989 * rsb and set master to be the remote node.
991 * recover masters, we are finding the new master for resources
992 * . dlm_recover_masters
994 * . dlm_send_rcom_lookup
995 * . receive_rcom_lookup
996 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
999 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
1000 unsigned int flags, int *r_nodeid, int *result)
1002 struct dlm_rsb *r = NULL;
1004 int our_nodeid = dlm_our_nodeid();
1005 int dir_nodeid, error;
1007 if (len > DLM_RESNAME_MAXLEN)
1010 if (from_nodeid == our_nodeid) {
1011 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
1016 hash = jhash(name, len, 0);
1017 b = hash & (ls->ls_rsbtbl_size - 1);
1019 dir_nodeid = dlm_hash2nodeid(ls, hash);
1020 if (dir_nodeid != our_nodeid) {
1021 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
1022 from_nodeid, dir_nodeid, our_nodeid, hash,
1029 error = pre_rsb_struct(ls);
1033 spin_lock(&ls->ls_rsbtbl[b].lock);
1034 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1036 /* because the rsb is active, we need to lock_rsb before
1037 * checking/changing re_master_nodeid
1041 spin_unlock(&ls->ls_rsbtbl[b].lock);
1044 __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false,
1045 flags, r_nodeid, result);
1047 /* the rsb was active */
1054 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1058 /* because the rsb is inactive (on toss list), it's not refcounted
1059 * and lock_rsb is not used, but is protected by the rsbtbl lock
1062 __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags,
1065 r->res_toss_time = jiffies;
1066 /* the rsb was inactive (on toss list) */
1067 spin_unlock(&ls->ls_rsbtbl[b].lock);
1072 error = get_rsb_struct(ls, name, len, &r);
1073 if (error == -EAGAIN) {
1074 spin_unlock(&ls->ls_rsbtbl[b].lock);
1082 r->res_dir_nodeid = our_nodeid;
1083 r->res_master_nodeid = from_nodeid;
1084 r->res_nodeid = from_nodeid;
1085 kref_init(&r->res_ref);
1086 r->res_toss_time = jiffies;
1088 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1090 /* should never happen */
1092 spin_unlock(&ls->ls_rsbtbl[b].lock);
1097 *result = DLM_LU_ADD;
1098 *r_nodeid = from_nodeid;
1100 spin_unlock(&ls->ls_rsbtbl[b].lock);
1104 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1110 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1111 spin_lock(&ls->ls_rsbtbl[i].lock);
1112 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1113 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1114 if (r->res_hash == hash)
1117 spin_unlock(&ls->ls_rsbtbl[i].lock);
1121 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1123 struct dlm_rsb *r = NULL;
1127 hash = jhash(name, len, 0);
1128 b = hash & (ls->ls_rsbtbl_size - 1);
1130 spin_lock(&ls->ls_rsbtbl[b].lock);
1131 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1135 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1141 spin_unlock(&ls->ls_rsbtbl[b].lock);
1144 static void toss_rsb(struct kref *kref)
1146 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1147 struct dlm_ls *ls = r->res_ls;
1149 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1150 kref_init(&r->res_ref);
1151 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1152 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1153 r->res_toss_time = jiffies;
1154 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1155 if (r->res_lvbptr) {
1156 dlm_free_lvb(r->res_lvbptr);
1157 r->res_lvbptr = NULL;
1161 /* See comment for unhold_lkb */
1163 static void unhold_rsb(struct dlm_rsb *r)
1166 rv = kref_put(&r->res_ref, toss_rsb);
1167 DLM_ASSERT(!rv, dlm_dump_rsb(r););
1170 static void kill_rsb(struct kref *kref)
1172 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1174 /* All work is done after the return from kref_put() so we
1175 can release the write_lock before the remove and free. */
1177 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1178 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1179 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1180 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1181 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1182 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1185 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1186 The rsb must exist as long as any lkb's for it do. */
1188 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1191 lkb->lkb_resource = r;
1194 static void detach_lkb(struct dlm_lkb *lkb)
1196 if (lkb->lkb_resource) {
1197 put_rsb(lkb->lkb_resource);
1198 lkb->lkb_resource = NULL;
1202 static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret,
1205 struct dlm_lkb *lkb;
1208 lkb = dlm_allocate_lkb(ls);
1212 lkb->lkb_last_bast_mode = -1;
1213 lkb->lkb_nodeid = -1;
1214 lkb->lkb_grmode = DLM_LOCK_IV;
1215 kref_init(&lkb->lkb_ref);
1216 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1217 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1218 #ifdef CONFIG_DLM_DEPRECATED_API
1219 INIT_LIST_HEAD(&lkb->lkb_time_list);
1221 INIT_LIST_HEAD(&lkb->lkb_cb_list);
1222 INIT_LIST_HEAD(&lkb->lkb_callbacks);
1223 spin_lock_init(&lkb->lkb_cb_lock);
1224 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1226 idr_preload(GFP_NOFS);
1227 spin_lock(&ls->ls_lkbidr_spin);
1228 rv = idr_alloc(&ls->ls_lkbidr, lkb, start, end, GFP_NOWAIT);
1231 spin_unlock(&ls->ls_lkbidr_spin);
1235 log_error(ls, "create_lkb idr error %d", rv);
1244 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1246 return _create_lkb(ls, lkb_ret, 1, 0);
1249 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1251 struct dlm_lkb *lkb;
1253 spin_lock(&ls->ls_lkbidr_spin);
1254 lkb = idr_find(&ls->ls_lkbidr, lkid);
1256 kref_get(&lkb->lkb_ref);
1257 spin_unlock(&ls->ls_lkbidr_spin);
1260 return lkb ? 0 : -ENOENT;
1263 static void kill_lkb(struct kref *kref)
1265 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1267 /* All work is done after the return from kref_put() so we
1268 can release the write_lock before the detach_lkb */
1270 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1273 /* __put_lkb() is used when an lkb may not have an rsb attached to
1274 it so we need to provide the lockspace explicitly */
1276 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1278 uint32_t lkid = lkb->lkb_id;
1281 rv = kref_put_lock(&lkb->lkb_ref, kill_lkb,
1282 &ls->ls_lkbidr_spin);
1284 idr_remove(&ls->ls_lkbidr, lkid);
1285 spin_unlock(&ls->ls_lkbidr_spin);
1289 /* for local/process lkbs, lvbptr points to caller's lksb */
1290 if (lkb->lkb_lvbptr && is_master_copy(lkb))
1291 dlm_free_lvb(lkb->lkb_lvbptr);
1298 int dlm_put_lkb(struct dlm_lkb *lkb)
1302 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1303 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1305 ls = lkb->lkb_resource->res_ls;
1306 return __put_lkb(ls, lkb);
1309 /* This is only called to add a reference when the code already holds
1310 a valid reference to the lkb, so there's no need for locking. */
1312 static inline void hold_lkb(struct dlm_lkb *lkb)
1314 kref_get(&lkb->lkb_ref);
1317 static void unhold_lkb_assert(struct kref *kref)
1319 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1321 DLM_ASSERT(false, dlm_print_lkb(lkb););
1324 /* This is called when we need to remove a reference and are certain
1325 it's not the last ref. e.g. del_lkb is always called between a
1326 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1327 put_lkb would work fine, but would involve unnecessary locking */
1329 static inline void unhold_lkb(struct dlm_lkb *lkb)
1331 kref_put(&lkb->lkb_ref, unhold_lkb_assert);
1334 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1337 struct dlm_lkb *lkb = NULL, *iter;
1339 list_for_each_entry(iter, head, lkb_statequeue)
1340 if (iter->lkb_rqmode < mode) {
1342 list_add_tail(new, &iter->lkb_statequeue);
1347 list_add_tail(new, head);
1350 /* add/remove lkb to rsb's grant/convert/wait queue */
1352 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1354 kref_get(&lkb->lkb_ref);
1356 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1358 lkb->lkb_timestamp = ktime_get();
1360 lkb->lkb_status = status;
1363 case DLM_LKSTS_WAITING:
1364 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1365 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1367 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1369 case DLM_LKSTS_GRANTED:
1370 /* convention says granted locks kept in order of grmode */
1371 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1374 case DLM_LKSTS_CONVERT:
1375 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1376 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1378 list_add_tail(&lkb->lkb_statequeue,
1379 &r->res_convertqueue);
1382 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1386 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1388 lkb->lkb_status = 0;
1389 list_del(&lkb->lkb_statequeue);
1393 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1397 add_lkb(r, lkb, sts);
1401 static int msg_reply_type(int mstype)
1404 case DLM_MSG_REQUEST:
1405 return DLM_MSG_REQUEST_REPLY;
1406 case DLM_MSG_CONVERT:
1407 return DLM_MSG_CONVERT_REPLY;
1408 case DLM_MSG_UNLOCK:
1409 return DLM_MSG_UNLOCK_REPLY;
1410 case DLM_MSG_CANCEL:
1411 return DLM_MSG_CANCEL_REPLY;
1412 case DLM_MSG_LOOKUP:
1413 return DLM_MSG_LOOKUP_REPLY;
1418 /* add/remove lkb from global waiters list of lkb's waiting for
1419 a reply from a remote node */
1421 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1423 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1426 mutex_lock(&ls->ls_waiters_mutex);
1428 if (is_overlap_unlock(lkb) ||
1429 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1434 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1436 case DLM_MSG_UNLOCK:
1437 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1439 case DLM_MSG_CANCEL:
1440 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1446 lkb->lkb_wait_count++;
1449 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1450 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1451 lkb->lkb_wait_count, lkb->lkb_flags);
1455 DLM_ASSERT(!lkb->lkb_wait_count,
1457 printk("wait_count %d\n", lkb->lkb_wait_count););
1459 lkb->lkb_wait_count++;
1460 lkb->lkb_wait_type = mstype;
1461 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1463 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1466 log_error(ls, "addwait error %x %d flags %x %d %d %s",
1467 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1468 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1469 mutex_unlock(&ls->ls_waiters_mutex);
1473 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1474 list as part of process_requestqueue (e.g. a lookup that has an optimized
1475 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1476 set RESEND and dlm_recover_waiters_post() */
1478 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1479 struct dlm_message *ms)
1481 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1482 int overlap_done = 0;
1484 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1485 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1486 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1491 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1492 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1493 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1498 /* Cancel state was preemptively cleared by a successful convert,
1499 see next comment, nothing to do. */
1501 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1502 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1503 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1504 lkb->lkb_id, lkb->lkb_wait_type);
1508 /* Remove for the convert reply, and premptively remove for the
1509 cancel reply. A convert has been granted while there's still
1510 an outstanding cancel on it (the cancel is moot and the result
1511 in the cancel reply should be 0). We preempt the cancel reply
1512 because the app gets the convert result and then can follow up
1513 with another op, like convert. This subsequent op would see the
1514 lingering state of the cancel and fail with -EBUSY. */
1516 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1517 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1518 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1519 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1521 lkb->lkb_wait_type = 0;
1522 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1523 lkb->lkb_wait_count--;
1528 /* N.B. type of reply may not always correspond to type of original
1529 msg due to lookup->request optimization, verify others? */
1531 if (lkb->lkb_wait_type) {
1532 lkb->lkb_wait_type = 0;
1536 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1537 lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0,
1538 lkb->lkb_remid, mstype, lkb->lkb_flags);
1542 /* the force-unlock/cancel has completed and we haven't recvd a reply
1543 to the op that was in progress prior to the unlock/cancel; we
1544 give up on any reply to the earlier op. FIXME: not sure when/how
1545 this would happen */
1547 if (overlap_done && lkb->lkb_wait_type) {
1548 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1549 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1550 lkb->lkb_wait_count--;
1552 lkb->lkb_wait_type = 0;
1555 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1557 lkb->lkb_flags &= ~DLM_IFL_RESEND;
1558 lkb->lkb_wait_count--;
1559 if (!lkb->lkb_wait_count)
1560 list_del_init(&lkb->lkb_wait_reply);
1565 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1567 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1570 mutex_lock(&ls->ls_waiters_mutex);
1571 error = _remove_from_waiters(lkb, mstype, NULL);
1572 mutex_unlock(&ls->ls_waiters_mutex);
1576 /* Handles situations where we might be processing a "fake" or "stub" reply in
1577 which we can't try to take waiters_mutex again. */
1579 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1581 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1584 if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS))
1585 mutex_lock(&ls->ls_waiters_mutex);
1586 error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
1587 if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS))
1588 mutex_unlock(&ls->ls_waiters_mutex);
1592 static void shrink_bucket(struct dlm_ls *ls, int b)
1594 struct rb_node *n, *next;
1597 int our_nodeid = dlm_our_nodeid();
1598 int remote_count = 0;
1599 int need_shrink = 0;
1602 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1604 spin_lock(&ls->ls_rsbtbl[b].lock);
1606 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1607 spin_unlock(&ls->ls_rsbtbl[b].lock);
1611 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1613 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1615 /* If we're the directory record for this rsb, and
1616 we're not the master of it, then we need to wait
1617 for the master node to send us a dir remove for
1618 before removing the dir record. */
1620 if (!dlm_no_directory(ls) &&
1621 (r->res_master_nodeid != our_nodeid) &&
1622 (dlm_dir_nodeid(r) == our_nodeid)) {
1628 if (!time_after_eq(jiffies, r->res_toss_time +
1629 dlm_config.ci_toss_secs * HZ)) {
1633 if (!dlm_no_directory(ls) &&
1634 (r->res_master_nodeid == our_nodeid) &&
1635 (dlm_dir_nodeid(r) != our_nodeid)) {
1637 /* We're the master of this rsb but we're not
1638 the directory record, so we need to tell the
1639 dir node to remove the dir record. */
1641 ls->ls_remove_lens[remote_count] = r->res_length;
1642 memcpy(ls->ls_remove_names[remote_count], r->res_name,
1643 DLM_RESNAME_MAXLEN);
1646 if (remote_count >= DLM_REMOVE_NAMES_MAX)
1651 if (!kref_put(&r->res_ref, kill_rsb)) {
1652 log_error(ls, "tossed rsb in use %s", r->res_name);
1656 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1661 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1663 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1664 spin_unlock(&ls->ls_rsbtbl[b].lock);
1667 * While searching for rsb's to free, we found some that require
1668 * remote removal. We leave them in place and find them again here
1669 * so there is a very small gap between removing them from the toss
1670 * list and sending the removal. Keeping this gap small is
1671 * important to keep us (the master node) from being out of sync
1672 * with the remote dir node for very long.
1675 for (i = 0; i < remote_count; i++) {
1676 name = ls->ls_remove_names[i];
1677 len = ls->ls_remove_lens[i];
1679 spin_lock(&ls->ls_rsbtbl[b].lock);
1680 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1682 spin_unlock(&ls->ls_rsbtbl[b].lock);
1683 log_debug(ls, "remove_name not toss %s", name);
1687 if (r->res_master_nodeid != our_nodeid) {
1688 spin_unlock(&ls->ls_rsbtbl[b].lock);
1689 log_debug(ls, "remove_name master %d dir %d our %d %s",
1690 r->res_master_nodeid, r->res_dir_nodeid,
1695 if (r->res_dir_nodeid == our_nodeid) {
1696 /* should never happen */
1697 spin_unlock(&ls->ls_rsbtbl[b].lock);
1698 log_error(ls, "remove_name dir %d master %d our %d %s",
1699 r->res_dir_nodeid, r->res_master_nodeid,
1704 if (!time_after_eq(jiffies, r->res_toss_time +
1705 dlm_config.ci_toss_secs * HZ)) {
1706 spin_unlock(&ls->ls_rsbtbl[b].lock);
1707 log_debug(ls, "remove_name toss_time %lu now %lu %s",
1708 r->res_toss_time, jiffies, name);
1712 if (!kref_put(&r->res_ref, kill_rsb)) {
1713 spin_unlock(&ls->ls_rsbtbl[b].lock);
1714 log_error(ls, "remove_name in use %s", name);
1718 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1720 spin_unlock(&ls->ls_rsbtbl[b].lock);
1726 void dlm_scan_rsbs(struct dlm_ls *ls)
1730 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1731 shrink_bucket(ls, i);
1732 if (dlm_locking_stopped(ls))
1738 #ifdef CONFIG_DLM_DEPRECATED_API
1739 static void add_timeout(struct dlm_lkb *lkb)
1741 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1743 if (is_master_copy(lkb))
1746 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1747 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1748 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1751 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1756 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1757 mutex_lock(&ls->ls_timeout_mutex);
1759 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1760 mutex_unlock(&ls->ls_timeout_mutex);
1763 static void del_timeout(struct dlm_lkb *lkb)
1765 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1767 mutex_lock(&ls->ls_timeout_mutex);
1768 if (!list_empty(&lkb->lkb_time_list)) {
1769 list_del_init(&lkb->lkb_time_list);
1772 mutex_unlock(&ls->ls_timeout_mutex);
1775 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1776 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1777 and then lock rsb because of lock ordering in add_timeout. We may need
1778 to specify some special timeout-related bits in the lkb that are just to
1779 be accessed under the timeout_mutex. */
1781 void dlm_scan_timeout(struct dlm_ls *ls)
1784 struct dlm_lkb *lkb = NULL, *iter;
1785 int do_cancel, do_warn;
1789 if (dlm_locking_stopped(ls))
1794 mutex_lock(&ls->ls_timeout_mutex);
1795 list_for_each_entry(iter, &ls->ls_timeout, lkb_time_list) {
1797 wait_us = ktime_to_us(ktime_sub(ktime_get(),
1798 iter->lkb_timestamp));
1800 if ((iter->lkb_exflags & DLM_LKF_TIMEOUT) &&
1801 wait_us >= (iter->lkb_timeout_cs * 10000))
1804 if ((iter->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1805 wait_us >= dlm_config.ci_timewarn_cs * 10000)
1808 if (!do_cancel && !do_warn)
1814 mutex_unlock(&ls->ls_timeout_mutex);
1819 r = lkb->lkb_resource;
1824 /* clear flag so we only warn once */
1825 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1826 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1828 dlm_timeout_warn(lkb);
1832 log_debug(ls, "timeout cancel %x node %d %s",
1833 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1834 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1835 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1837 _cancel_lock(r, lkb);
1846 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1847 dlm_recoverd before checking/setting ls_recover_begin. */
1849 void dlm_adjust_timeouts(struct dlm_ls *ls)
1851 struct dlm_lkb *lkb;
1852 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1854 ls->ls_recover_begin = 0;
1855 mutex_lock(&ls->ls_timeout_mutex);
1856 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1857 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1858 mutex_unlock(&ls->ls_timeout_mutex);
1861 static void add_timeout(struct dlm_lkb *lkb) { }
1862 static void del_timeout(struct dlm_lkb *lkb) { }
1865 /* lkb is master or local copy */
1867 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1869 int b, len = r->res_ls->ls_lvblen;
1871 /* b=1 lvb returned to caller
1872 b=0 lvb written to rsb or invalidated
1875 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1878 if (!lkb->lkb_lvbptr)
1881 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1887 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1888 lkb->lkb_lvbseq = r->res_lvbseq;
1890 } else if (b == 0) {
1891 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1892 rsb_set_flag(r, RSB_VALNOTVALID);
1896 if (!lkb->lkb_lvbptr)
1899 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1903 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1908 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1910 lkb->lkb_lvbseq = r->res_lvbseq;
1911 rsb_clear_flag(r, RSB_VALNOTVALID);
1914 if (rsb_flag(r, RSB_VALNOTVALID))
1915 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1918 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1920 if (lkb->lkb_grmode < DLM_LOCK_PW)
1923 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1924 rsb_set_flag(r, RSB_VALNOTVALID);
1928 if (!lkb->lkb_lvbptr)
1931 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1935 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1940 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
1942 rsb_clear_flag(r, RSB_VALNOTVALID);
1945 /* lkb is process copy (pc) */
1947 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
1948 struct dlm_message *ms)
1952 if (!lkb->lkb_lvbptr)
1955 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1958 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1960 int len = receive_extralen(ms);
1961 if (len > r->res_ls->ls_lvblen)
1962 len = r->res_ls->ls_lvblen;
1963 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
1964 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
1968 /* Manipulate lkb's on rsb's convert/granted/waiting queues
1969 remove_lock -- used for unlock, removes lkb from granted
1970 revert_lock -- used for cancel, moves lkb from convert to granted
1971 grant_lock -- used for request and convert, adds lkb to granted or
1972 moves lkb from convert or waiting to granted
1974 Each of these is used for master or local copy lkb's. There is
1975 also a _pc() variation used to make the corresponding change on
1976 a process copy (pc) lkb. */
1978 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1981 lkb->lkb_grmode = DLM_LOCK_IV;
1982 /* this unhold undoes the original ref from create_lkb()
1983 so this leads to the lkb being freed */
1987 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1989 set_lvb_unlock(r, lkb);
1990 _remove_lock(r, lkb);
1993 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
1995 _remove_lock(r, lkb);
1998 /* returns: 0 did nothing
1999 1 moved lock to granted
2002 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2006 lkb->lkb_rqmode = DLM_LOCK_IV;
2008 switch (lkb->lkb_status) {
2009 case DLM_LKSTS_GRANTED:
2011 case DLM_LKSTS_CONVERT:
2012 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2015 case DLM_LKSTS_WAITING:
2017 lkb->lkb_grmode = DLM_LOCK_IV;
2018 /* this unhold undoes the original ref from create_lkb()
2019 so this leads to the lkb being freed */
2024 log_print("invalid status for revert %d", lkb->lkb_status);
2029 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2031 return revert_lock(r, lkb);
2034 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2036 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2037 lkb->lkb_grmode = lkb->lkb_rqmode;
2038 if (lkb->lkb_status)
2039 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2041 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2044 lkb->lkb_rqmode = DLM_LOCK_IV;
2045 lkb->lkb_highbast = 0;
2048 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2050 set_lvb_lock(r, lkb);
2051 _grant_lock(r, lkb);
2054 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2055 struct dlm_message *ms)
2057 set_lvb_lock_pc(r, lkb, ms);
2058 _grant_lock(r, lkb);
2061 /* called by grant_pending_locks() which means an async grant message must
2062 be sent to the requesting node in addition to granting the lock if the
2063 lkb belongs to a remote node. */
2065 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2068 if (is_master_copy(lkb))
2071 queue_cast(r, lkb, 0);
2074 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2075 change the granted/requested modes. We're munging things accordingly in
2077 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2079 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2080 compatible with other granted locks */
2082 static void munge_demoted(struct dlm_lkb *lkb)
2084 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2085 log_print("munge_demoted %x invalid modes gr %d rq %d",
2086 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2090 lkb->lkb_grmode = DLM_LOCK_NL;
2093 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2095 if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) &&
2096 ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) {
2097 log_print("munge_altmode %x invalid reply type %d",
2098 lkb->lkb_id, le32_to_cpu(ms->m_type));
2102 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2103 lkb->lkb_rqmode = DLM_LOCK_PR;
2104 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2105 lkb->lkb_rqmode = DLM_LOCK_CW;
2107 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2112 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2114 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2116 if (lkb->lkb_id == first->lkb_id)
2122 /* Check if the given lkb conflicts with another lkb on the queue. */
2124 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2126 struct dlm_lkb *this;
2128 list_for_each_entry(this, head, lkb_statequeue) {
2131 if (!modes_compat(this, lkb))
2138 * "A conversion deadlock arises with a pair of lock requests in the converting
2139 * queue for one resource. The granted mode of each lock blocks the requested
2140 * mode of the other lock."
2142 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2143 * convert queue from being granted, then deadlk/demote lkb.
2146 * Granted Queue: empty
2147 * Convert Queue: NL->EX (first lock)
2148 * PR->EX (second lock)
2150 * The first lock can't be granted because of the granted mode of the second
2151 * lock and the second lock can't be granted because it's not first in the
2152 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2153 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2154 * flag set and return DEMOTED in the lksb flags.
2156 * Originally, this function detected conv-deadlk in a more limited scope:
2157 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2158 * - if lkb1 was the first entry in the queue (not just earlier), and was
2159 * blocked by the granted mode of lkb2, and there was nothing on the
2160 * granted queue preventing lkb1 from being granted immediately, i.e.
2161 * lkb2 was the only thing preventing lkb1 from being granted.
2163 * That second condition meant we'd only say there was conv-deadlk if
2164 * resolving it (by demotion) would lead to the first lock on the convert
2165 * queue being granted right away. It allowed conversion deadlocks to exist
2166 * between locks on the convert queue while they couldn't be granted anyway.
2168 * Now, we detect and take action on conversion deadlocks immediately when
2169 * they're created, even if they may not be immediately consequential. If
2170 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2171 * mode that would prevent lkb1's conversion from being granted, we do a
2172 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2173 * I think this means that the lkb_is_ahead condition below should always
2174 * be zero, i.e. there will never be conv-deadlk between two locks that are
2175 * both already on the convert queue.
2178 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2180 struct dlm_lkb *lkb1;
2181 int lkb_is_ahead = 0;
2183 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2189 if (!lkb_is_ahead) {
2190 if (!modes_compat(lkb2, lkb1))
2193 if (!modes_compat(lkb2, lkb1) &&
2194 !modes_compat(lkb1, lkb2))
2202 * Return 1 if the lock can be granted, 0 otherwise.
2203 * Also detect and resolve conversion deadlocks.
2205 * lkb is the lock to be granted
2207 * now is 1 if the function is being called in the context of the
2208 * immediate request, it is 0 if called later, after the lock has been
2211 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2214 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2217 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2220 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2223 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2224 * a new request for a NL mode lock being blocked.
2226 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2227 * request, then it would be granted. In essence, the use of this flag
2228 * tells the Lock Manager to expedite theis request by not considering
2229 * what may be in the CONVERTING or WAITING queues... As of this
2230 * writing, the EXPEDITE flag can be used only with new requests for NL
2231 * mode locks. This flag is not valid for conversion requests.
2233 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2234 * conversion or used with a non-NL requested mode. We also know an
2235 * EXPEDITE request is always granted immediately, so now must always
2236 * be 1. The full condition to grant an expedite request: (now &&
2237 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2238 * therefore be shortened to just checking the flag.
2241 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2245 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2246 * added to the remaining conditions.
2249 if (queue_conflict(&r->res_grantqueue, lkb))
2253 * 6-3: By default, a conversion request is immediately granted if the
2254 * requested mode is compatible with the modes of all other granted
2258 if (queue_conflict(&r->res_convertqueue, lkb))
2262 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2263 * locks for a recovered rsb, on which lkb's have been rebuilt.
2264 * The lkb's may have been rebuilt on the queues in a different
2265 * order than they were in on the previous master. So, granting
2266 * queued conversions in order after recovery doesn't make sense
2267 * since the order hasn't been preserved anyway. The new order
2268 * could also have created a new "in place" conversion deadlock.
2269 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2270 * After recovery, there would be no granted locks, and possibly
2271 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
2272 * recovery, grant conversions without considering order.
2275 if (conv && recover)
2279 * 6-5: But the default algorithm for deciding whether to grant or
2280 * queue conversion requests does not by itself guarantee that such
2281 * requests are serviced on a "first come first serve" basis. This, in
2282 * turn, can lead to a phenomenon known as "indefinate postponement".
2284 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2285 * the system service employed to request a lock conversion. This flag
2286 * forces certain conversion requests to be queued, even if they are
2287 * compatible with the granted modes of other locks on the same
2288 * resource. Thus, the use of this flag results in conversion requests
2289 * being ordered on a "first come first servce" basis.
2291 * DCT: This condition is all about new conversions being able to occur
2292 * "in place" while the lock remains on the granted queue (assuming
2293 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2294 * doesn't _have_ to go onto the convert queue where it's processed in
2295 * order. The "now" variable is necessary to distinguish converts
2296 * being received and processed for the first time now, because once a
2297 * convert is moved to the conversion queue the condition below applies
2298 * requiring fifo granting.
2301 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2305 * Even if the convert is compat with all granted locks,
2306 * QUECVT forces it behind other locks on the convert queue.
2309 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2310 if (list_empty(&r->res_convertqueue))
2317 * The NOORDER flag is set to avoid the standard vms rules on grant
2321 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2325 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2326 * granted until all other conversion requests ahead of it are granted
2330 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2334 * 6-4: By default, a new request is immediately granted only if all
2335 * three of the following conditions are satisfied when the request is
2337 * - The queue of ungranted conversion requests for the resource is
2339 * - The queue of ungranted new requests for the resource is empty.
2340 * - The mode of the new request is compatible with the most
2341 * restrictive mode of all granted locks on the resource.
2344 if (now && !conv && list_empty(&r->res_convertqueue) &&
2345 list_empty(&r->res_waitqueue))
2349 * 6-4: Once a lock request is in the queue of ungranted new requests,
2350 * it cannot be granted until the queue of ungranted conversion
2351 * requests is empty, all ungranted new requests ahead of it are
2352 * granted and/or canceled, and it is compatible with the granted mode
2353 * of the most restrictive lock granted on the resource.
2356 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2357 first_in_list(lkb, &r->res_waitqueue))
2363 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2364 int recover, int *err)
2367 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2368 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2373 rv = _can_be_granted(r, lkb, now, recover);
2378 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2379 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2380 * cancels one of the locks.
2383 if (is_convert && can_be_queued(lkb) &&
2384 conversion_deadlock_detect(r, lkb)) {
2385 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2386 lkb->lkb_grmode = DLM_LOCK_NL;
2387 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2391 log_print("can_be_granted deadlock %x now %d",
2399 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2400 * to grant a request in a mode other than the normal rqmode. It's a
2401 * simple way to provide a big optimization to applications that can
2405 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2407 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2411 lkb->lkb_rqmode = alt;
2412 rv = _can_be_granted(r, lkb, now, 0);
2414 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2416 lkb->lkb_rqmode = rqmode;
2422 /* Returns the highest requested mode of all blocked conversions; sets
2423 cw if there's a blocked conversion to DLM_LOCK_CW. */
2425 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2426 unsigned int *count)
2428 struct dlm_lkb *lkb, *s;
2429 int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2430 int hi, demoted, quit, grant_restart, demote_restart;
2439 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2440 demoted = is_demoted(lkb);
2443 if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2444 grant_lock_pending(r, lkb);
2451 if (!demoted && is_demoted(lkb)) {
2452 log_print("WARN: pending demoted %x node %d %s",
2453 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2460 * If DLM_LKB_NODLKWT flag is set and conversion
2461 * deadlock is detected, we request blocking AST and
2462 * down (or cancel) conversion.
2464 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2465 if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2466 queue_bast(r, lkb, lkb->lkb_rqmode);
2467 lkb->lkb_highbast = lkb->lkb_rqmode;
2470 log_print("WARN: pending deadlock %x node %d %s",
2471 lkb->lkb_id, lkb->lkb_nodeid,
2478 hi = max_t(int, lkb->lkb_rqmode, hi);
2480 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2486 if (demote_restart && !quit) {
2491 return max_t(int, high, hi);
2494 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2495 unsigned int *count)
2497 struct dlm_lkb *lkb, *s;
2499 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2500 if (can_be_granted(r, lkb, 0, 0, NULL)) {
2501 grant_lock_pending(r, lkb);
2505 high = max_t(int, lkb->lkb_rqmode, high);
2506 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2514 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2515 on either the convert or waiting queue.
2516 high is the largest rqmode of all locks blocked on the convert or
2519 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2521 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2522 if (gr->lkb_highbast < DLM_LOCK_EX)
2527 if (gr->lkb_highbast < high &&
2528 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2533 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2535 struct dlm_lkb *lkb, *s;
2536 int high = DLM_LOCK_IV;
2539 if (!is_master(r)) {
2540 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2545 high = grant_pending_convert(r, high, &cw, count);
2546 high = grant_pending_wait(r, high, &cw, count);
2548 if (high == DLM_LOCK_IV)
2552 * If there are locks left on the wait/convert queue then send blocking
2553 * ASTs to granted locks based on the largest requested mode (high)
2557 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2558 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2559 if (cw && high == DLM_LOCK_PR &&
2560 lkb->lkb_grmode == DLM_LOCK_PR)
2561 queue_bast(r, lkb, DLM_LOCK_CW);
2563 queue_bast(r, lkb, high);
2564 lkb->lkb_highbast = high;
2569 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2571 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2572 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2573 if (gr->lkb_highbast < DLM_LOCK_EX)
2578 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2583 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2584 struct dlm_lkb *lkb)
2588 list_for_each_entry(gr, head, lkb_statequeue) {
2589 /* skip self when sending basts to convertqueue */
2592 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2593 queue_bast(r, gr, lkb->lkb_rqmode);
2594 gr->lkb_highbast = lkb->lkb_rqmode;
2599 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2601 send_bast_queue(r, &r->res_grantqueue, lkb);
2604 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2606 send_bast_queue(r, &r->res_grantqueue, lkb);
2607 send_bast_queue(r, &r->res_convertqueue, lkb);
2610 /* set_master(r, lkb) -- set the master nodeid of a resource
2612 The purpose of this function is to set the nodeid field in the given
2613 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2614 known, it can just be copied to the lkb and the function will return
2615 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2616 before it can be copied to the lkb.
2618 When the rsb nodeid is being looked up remotely, the initial lkb
2619 causing the lookup is kept on the ls_waiters list waiting for the
2620 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2621 on the rsb's res_lookup list until the master is verified.
2624 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2625 1: the rsb master is not available and the lkb has been placed on
2629 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2631 int our_nodeid = dlm_our_nodeid();
2633 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2634 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2635 r->res_first_lkid = lkb->lkb_id;
2636 lkb->lkb_nodeid = r->res_nodeid;
2640 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2641 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2645 if (r->res_master_nodeid == our_nodeid) {
2646 lkb->lkb_nodeid = 0;
2650 if (r->res_master_nodeid) {
2651 lkb->lkb_nodeid = r->res_master_nodeid;
2655 if (dlm_dir_nodeid(r) == our_nodeid) {
2656 /* This is a somewhat unusual case; find_rsb will usually
2657 have set res_master_nodeid when dir nodeid is local, but
2658 there are cases where we become the dir node after we've
2659 past find_rsb and go through _request_lock again.
2660 confirm_master() or process_lookup_list() needs to be
2661 called after this. */
2662 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2663 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2665 r->res_master_nodeid = our_nodeid;
2667 lkb->lkb_nodeid = 0;
2671 r->res_first_lkid = lkb->lkb_id;
2672 send_lookup(r, lkb);
2676 static void process_lookup_list(struct dlm_rsb *r)
2678 struct dlm_lkb *lkb, *safe;
2680 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2681 list_del_init(&lkb->lkb_rsb_lookup);
2682 _request_lock(r, lkb);
2687 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2689 static void confirm_master(struct dlm_rsb *r, int error)
2691 struct dlm_lkb *lkb;
2693 if (!r->res_first_lkid)
2699 r->res_first_lkid = 0;
2700 process_lookup_list(r);
2706 /* the remote request failed and won't be retried (it was
2707 a NOQUEUE, or has been canceled/unlocked); make a waiting
2708 lkb the first_lkid */
2710 r->res_first_lkid = 0;
2712 if (!list_empty(&r->res_lookup)) {
2713 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2715 list_del_init(&lkb->lkb_rsb_lookup);
2716 r->res_first_lkid = lkb->lkb_id;
2717 _request_lock(r, lkb);
2722 log_error(r->res_ls, "confirm_master unknown error %d", error);
2726 #ifdef CONFIG_DLM_DEPRECATED_API
2727 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2728 int namelen, unsigned long timeout_cs,
2729 void (*ast) (void *astparam),
2731 void (*bast) (void *astparam, int mode),
2732 struct dlm_args *args)
2734 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2735 int namelen, void (*ast)(void *astparam),
2737 void (*bast)(void *astparam, int mode),
2738 struct dlm_args *args)
2743 /* check for invalid arg usage */
2745 if (mode < 0 || mode > DLM_LOCK_EX)
2748 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2751 if (flags & DLM_LKF_CANCEL)
2754 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2757 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2760 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2763 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2766 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2769 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2772 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2778 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2781 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2784 /* these args will be copied to the lkb in validate_lock_args,
2785 it cannot be done now because when converting locks, fields in
2786 an active lkb cannot be modified before locking the rsb */
2788 args->flags = flags;
2790 args->astparam = astparam;
2791 args->bastfn = bast;
2792 #ifdef CONFIG_DLM_DEPRECATED_API
2793 args->timeout = timeout_cs;
2802 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2804 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2805 DLM_LKF_FORCEUNLOCK))
2808 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2811 args->flags = flags;
2812 args->astparam = astarg;
2816 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2817 struct dlm_args *args)
2821 if (args->flags & DLM_LKF_CONVERT) {
2822 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2825 /* lock not allowed if there's any op in progress */
2826 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
2829 if (is_overlap(lkb))
2833 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2836 if (args->flags & DLM_LKF_QUECVT &&
2837 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2841 lkb->lkb_exflags = args->flags;
2842 lkb->lkb_sbflags = 0;
2843 lkb->lkb_astfn = args->astfn;
2844 lkb->lkb_astparam = args->astparam;
2845 lkb->lkb_bastfn = args->bastfn;
2846 lkb->lkb_rqmode = args->mode;
2847 lkb->lkb_lksb = args->lksb;
2848 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2849 lkb->lkb_ownpid = (int) current->pid;
2850 #ifdef CONFIG_DLM_DEPRECATED_API
2851 lkb->lkb_timeout_cs = args->timeout;
2859 /* annoy the user because dlm usage is wrong */
2861 log_error(ls, "%s %d %x %x %x %d %d %s", __func__,
2862 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2863 lkb->lkb_status, lkb->lkb_wait_type,
2864 lkb->lkb_resource->res_name);
2867 log_debug(ls, "%s %d %x %x %x %d %d %s", __func__,
2868 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2869 lkb->lkb_status, lkb->lkb_wait_type,
2870 lkb->lkb_resource->res_name);
2877 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2880 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2881 because there may be a lookup in progress and it's valid to do
2882 cancel/unlockf on it */
2884 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2886 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2889 /* normal unlock not allowed if there's any op in progress */
2890 if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) &&
2891 (lkb->lkb_wait_type || lkb->lkb_wait_count))
2894 /* an lkb may be waiting for an rsb lookup to complete where the
2895 lookup was initiated by another lock */
2897 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2898 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2899 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2900 list_del_init(&lkb->lkb_rsb_lookup);
2901 queue_cast(lkb->lkb_resource, lkb,
2902 args->flags & DLM_LKF_CANCEL ?
2903 -DLM_ECANCEL : -DLM_EUNLOCK);
2904 unhold_lkb(lkb); /* undoes create_lkb() */
2906 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2911 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2912 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2917 /* an lkb may still exist even though the lock is EOL'ed due to a
2918 * cancel, unlock or failed noqueue request; an app can't use these
2919 * locks; return same error as if the lkid had not been found at all
2922 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2923 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2928 /* cancel not allowed with another cancel/unlock in progress */
2930 if (args->flags & DLM_LKF_CANCEL) {
2931 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2934 if (is_overlap(lkb))
2937 /* don't let scand try to do a cancel */
2940 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2941 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2946 /* there's nothing to cancel */
2947 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2948 !lkb->lkb_wait_type) {
2953 switch (lkb->lkb_wait_type) {
2954 case DLM_MSG_LOOKUP:
2955 case DLM_MSG_REQUEST:
2956 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2959 case DLM_MSG_UNLOCK:
2960 case DLM_MSG_CANCEL:
2963 /* add_to_waiters() will set OVERLAP_CANCEL */
2967 /* do we need to allow a force-unlock if there's a normal unlock
2968 already in progress? in what conditions could the normal unlock
2969 fail such that we'd want to send a force-unlock to be sure? */
2971 if (args->flags & DLM_LKF_FORCEUNLOCK) {
2972 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
2975 if (is_overlap_unlock(lkb))
2978 /* don't let scand try to do a cancel */
2981 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2982 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2987 switch (lkb->lkb_wait_type) {
2988 case DLM_MSG_LOOKUP:
2989 case DLM_MSG_REQUEST:
2990 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
2993 case DLM_MSG_UNLOCK:
2996 /* add_to_waiters() will set OVERLAP_UNLOCK */
3000 /* an overlapping op shouldn't blow away exflags from other op */
3001 lkb->lkb_exflags |= args->flags;
3002 lkb->lkb_sbflags = 0;
3003 lkb->lkb_astparam = args->astparam;
3010 /* annoy the user because dlm usage is wrong */
3012 log_error(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
3013 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3014 args->flags, lkb->lkb_wait_type,
3015 lkb->lkb_resource->res_name);
3018 log_debug(ls, "%s %d %x %x %x %x %d %s", __func__, rv,
3019 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3020 args->flags, lkb->lkb_wait_type,
3021 lkb->lkb_resource->res_name);
3029 * Four stage 4 varieties:
3030 * do_request(), do_convert(), do_unlock(), do_cancel()
3031 * These are called on the master node for the given lock and
3032 * from the central locking logic.
3035 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3039 if (can_be_granted(r, lkb, 1, 0, NULL)) {
3041 queue_cast(r, lkb, 0);
3045 if (can_be_queued(lkb)) {
3046 error = -EINPROGRESS;
3047 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3053 queue_cast(r, lkb, -EAGAIN);
3058 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3063 if (force_blocking_asts(lkb))
3064 send_blocking_asts_all(r, lkb);
3067 send_blocking_asts(r, lkb);
3072 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3077 /* changing an existing lock may allow others to be granted */
3079 if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3081 queue_cast(r, lkb, 0);
3085 /* can_be_granted() detected that this lock would block in a conversion
3086 deadlock, so we leave it on the granted queue and return EDEADLK in
3087 the ast for the convert. */
3089 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
3090 /* it's left on the granted queue */
3091 revert_lock(r, lkb);
3092 queue_cast(r, lkb, -EDEADLK);
3097 /* is_demoted() means the can_be_granted() above set the grmode
3098 to NL, and left us on the granted queue. This auto-demotion
3099 (due to CONVDEADLK) might mean other locks, and/or this lock, are
3100 now grantable. We have to try to grant other converting locks
3101 before we try again to grant this one. */
3103 if (is_demoted(lkb)) {
3104 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3105 if (_can_be_granted(r, lkb, 1, 0)) {
3107 queue_cast(r, lkb, 0);
3110 /* else fall through and move to convert queue */
3113 if (can_be_queued(lkb)) {
3114 error = -EINPROGRESS;
3116 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3122 queue_cast(r, lkb, -EAGAIN);
3127 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3132 grant_pending_locks(r, NULL);
3133 /* grant_pending_locks also sends basts */
3136 if (force_blocking_asts(lkb))
3137 send_blocking_asts_all(r, lkb);
3140 send_blocking_asts(r, lkb);
3145 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3147 remove_lock(r, lkb);
3148 queue_cast(r, lkb, -DLM_EUNLOCK);
3149 return -DLM_EUNLOCK;
3152 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3155 grant_pending_locks(r, NULL);
3158 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3160 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3164 error = revert_lock(r, lkb);
3166 queue_cast(r, lkb, -DLM_ECANCEL);
3167 return -DLM_ECANCEL;
3172 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3176 grant_pending_locks(r, NULL);
3180 * Four stage 3 varieties:
3181 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3184 /* add a new lkb to a possibly new rsb, called by requesting process */
3186 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3190 /* set_master: sets lkb nodeid from r */
3192 error = set_master(r, lkb);
3201 /* receive_request() calls do_request() on remote node */
3202 error = send_request(r, lkb);
3204 error = do_request(r, lkb);
3205 /* for remote locks the request_reply is sent
3206 between do_request and do_request_effects */
3207 do_request_effects(r, lkb, error);
3213 /* change some property of an existing lkb, e.g. mode */
3215 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3220 /* receive_convert() calls do_convert() on remote node */
3221 error = send_convert(r, lkb);
3223 error = do_convert(r, lkb);
3224 /* for remote locks the convert_reply is sent
3225 between do_convert and do_convert_effects */
3226 do_convert_effects(r, lkb, error);
3232 /* remove an existing lkb from the granted queue */
3234 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3239 /* receive_unlock() calls do_unlock() on remote node */
3240 error = send_unlock(r, lkb);
3242 error = do_unlock(r, lkb);
3243 /* for remote locks the unlock_reply is sent
3244 between do_unlock and do_unlock_effects */
3245 do_unlock_effects(r, lkb, error);
3251 /* remove an existing lkb from the convert or wait queue */
3253 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3258 /* receive_cancel() calls do_cancel() on remote node */
3259 error = send_cancel(r, lkb);
3261 error = do_cancel(r, lkb);
3262 /* for remote locks the cancel_reply is sent
3263 between do_cancel and do_cancel_effects */
3264 do_cancel_effects(r, lkb, error);
3271 * Four stage 2 varieties:
3272 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3275 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3276 const void *name, int len,
3277 struct dlm_args *args)
3282 error = validate_lock_args(ls, lkb, args);
3286 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3293 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3295 error = _request_lock(r, lkb);
3302 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3303 struct dlm_args *args)
3308 r = lkb->lkb_resource;
3313 error = validate_lock_args(ls, lkb, args);
3317 error = _convert_lock(r, lkb);
3324 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3325 struct dlm_args *args)
3330 r = lkb->lkb_resource;
3335 error = validate_unlock_args(lkb, args);
3339 error = _unlock_lock(r, lkb);
3346 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3347 struct dlm_args *args)
3352 r = lkb->lkb_resource;
3357 error = validate_unlock_args(lkb, args);
3361 error = _cancel_lock(r, lkb);
3369 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3372 int dlm_lock(dlm_lockspace_t *lockspace,
3374 struct dlm_lksb *lksb,
3377 unsigned int namelen,
3378 uint32_t parent_lkid,
3379 void (*ast) (void *astarg),
3381 void (*bast) (void *astarg, int mode))
3384 struct dlm_lkb *lkb;
3385 struct dlm_args args;
3386 int error, convert = flags & DLM_LKF_CONVERT;
3388 ls = dlm_find_lockspace_local(lockspace);
3392 dlm_lock_recovery(ls);
3395 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3397 error = create_lkb(ls, &lkb);
3402 trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
3404 #ifdef CONFIG_DLM_DEPRECATED_API
3405 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3406 astarg, bast, &args);
3408 error = set_lock_args(mode, lksb, flags, namelen, ast, astarg, bast,
3415 error = convert_lock(ls, lkb, &args);
3417 error = request_lock(ls, lkb, name, namelen, &args);
3419 if (error == -EINPROGRESS)
3422 trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true);
3424 if (convert || error)
3426 if (error == -EAGAIN || error == -EDEADLK)
3429 dlm_unlock_recovery(ls);
3430 dlm_put_lockspace(ls);
3434 int dlm_unlock(dlm_lockspace_t *lockspace,
3437 struct dlm_lksb *lksb,
3441 struct dlm_lkb *lkb;
3442 struct dlm_args args;
3445 ls = dlm_find_lockspace_local(lockspace);
3449 dlm_lock_recovery(ls);
3451 error = find_lkb(ls, lkid, &lkb);
3455 trace_dlm_unlock_start(ls, lkb, flags);
3457 error = set_unlock_args(flags, astarg, &args);
3461 if (flags & DLM_LKF_CANCEL)
3462 error = cancel_lock(ls, lkb, &args);
3464 error = unlock_lock(ls, lkb, &args);
3466 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3468 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3471 trace_dlm_unlock_end(ls, lkb, flags, error);
3475 dlm_unlock_recovery(ls);
3476 dlm_put_lockspace(ls);
3481 * send/receive routines for remote operations and replies
3485 * send_request receive_request
3486 * send_convert receive_convert
3487 * send_unlock receive_unlock
3488 * send_cancel receive_cancel
3489 * send_grant receive_grant
3490 * send_bast receive_bast
3491 * send_lookup receive_lookup
3492 * send_remove receive_remove
3495 * receive_request_reply send_request_reply
3496 * receive_convert_reply send_convert_reply
3497 * receive_unlock_reply send_unlock_reply
3498 * receive_cancel_reply send_cancel_reply
3499 * receive_lookup_reply send_lookup_reply
3502 static int _create_message(struct dlm_ls *ls, int mb_len,
3503 int to_nodeid, int mstype,
3504 struct dlm_message **ms_ret,
3505 struct dlm_mhandle **mh_ret,
3508 struct dlm_message *ms;
3509 struct dlm_mhandle *mh;
3512 /* get_buffer gives us a message handle (mh) that we need to
3513 pass into midcomms_commit and a message buffer (mb) that we
3514 write our data into */
3516 mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, allocation, &mb);
3520 ms = (struct dlm_message *) mb;
3522 ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3523 ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
3524 ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
3525 ms->m_header.h_length = cpu_to_le16(mb_len);
3526 ms->m_header.h_cmd = DLM_MSG;
3528 ms->m_type = cpu_to_le32(mstype);
3535 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3536 int to_nodeid, int mstype,
3537 struct dlm_message **ms_ret,
3538 struct dlm_mhandle **mh_ret,
3541 int mb_len = sizeof(struct dlm_message);
3544 case DLM_MSG_REQUEST:
3545 case DLM_MSG_LOOKUP:
3546 case DLM_MSG_REMOVE:
3547 mb_len += r->res_length;
3549 case DLM_MSG_CONVERT:
3550 case DLM_MSG_UNLOCK:
3551 case DLM_MSG_REQUEST_REPLY:
3552 case DLM_MSG_CONVERT_REPLY:
3554 if (lkb && lkb->lkb_lvbptr)
3555 mb_len += r->res_ls->ls_lvblen;
3559 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3560 ms_ret, mh_ret, allocation);
3563 /* further lowcomms enhancements or alternate implementations may make
3564 the return value from this function useful at some point */
3566 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms,
3567 const void *name, int namelen)
3569 dlm_midcomms_commit_mhandle(mh, name, namelen);
3573 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3574 struct dlm_message *ms)
3576 ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid);
3577 ms->m_pid = cpu_to_le32(lkb->lkb_ownpid);
3578 ms->m_lkid = cpu_to_le32(lkb->lkb_id);
3579 ms->m_remid = cpu_to_le32(lkb->lkb_remid);
3580 ms->m_exflags = cpu_to_le32(lkb->lkb_exflags);
3581 ms->m_sbflags = cpu_to_le32(lkb->lkb_sbflags);
3582 ms->m_flags = cpu_to_le32(lkb->lkb_flags);
3583 ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq);
3584 ms->m_status = cpu_to_le32(lkb->lkb_status);
3585 ms->m_grmode = cpu_to_le32(lkb->lkb_grmode);
3586 ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode);
3587 ms->m_hash = cpu_to_le32(r->res_hash);
3589 /* m_result and m_bastmode are set from function args,
3590 not from lkb fields */
3592 if (lkb->lkb_bastfn)
3593 ms->m_asts |= cpu_to_le32(DLM_CB_BAST);
3595 ms->m_asts |= cpu_to_le32(DLM_CB_CAST);
3597 /* compare with switch in create_message; send_remove() doesn't
3600 switch (ms->m_type) {
3601 case cpu_to_le32(DLM_MSG_REQUEST):
3602 case cpu_to_le32(DLM_MSG_LOOKUP):
3603 memcpy(ms->m_extra, r->res_name, r->res_length);
3605 case cpu_to_le32(DLM_MSG_CONVERT):
3606 case cpu_to_le32(DLM_MSG_UNLOCK):
3607 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
3608 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
3609 case cpu_to_le32(DLM_MSG_GRANT):
3610 if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK))
3612 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3617 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3619 struct dlm_message *ms;
3620 struct dlm_mhandle *mh;
3621 int to_nodeid, error;
3623 to_nodeid = r->res_nodeid;
3625 error = add_to_waiters(lkb, mstype, to_nodeid);
3629 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS);
3633 send_args(r, lkb, ms);
3635 error = send_message(mh, ms, r->res_name, r->res_length);
3641 remove_from_waiters(lkb, msg_reply_type(mstype));
3645 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3647 return send_common(r, lkb, DLM_MSG_REQUEST);
3650 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3654 error = send_common(r, lkb, DLM_MSG_CONVERT);
3656 /* down conversions go without a reply from the master */
3657 if (!error && down_conversion(lkb)) {
3658 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3659 r->res_ls->ls_stub_ms.m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
3660 r->res_ls->ls_stub_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
3661 r->res_ls->ls_stub_ms.m_result = 0;
3662 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3668 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3669 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3670 that the master is still correct. */
3672 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3674 return send_common(r, lkb, DLM_MSG_UNLOCK);
3677 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3679 return send_common(r, lkb, DLM_MSG_CANCEL);
3682 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3684 struct dlm_message *ms;
3685 struct dlm_mhandle *mh;
3686 int to_nodeid, error;
3688 to_nodeid = lkb->lkb_nodeid;
3690 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh,
3695 send_args(r, lkb, ms);
3699 error = send_message(mh, ms, r->res_name, r->res_length);
3704 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3706 struct dlm_message *ms;
3707 struct dlm_mhandle *mh;
3708 int to_nodeid, error;
3710 to_nodeid = lkb->lkb_nodeid;
3712 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh,
3717 send_args(r, lkb, ms);
3719 ms->m_bastmode = cpu_to_le32(mode);
3721 error = send_message(mh, ms, r->res_name, r->res_length);
3726 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3728 struct dlm_message *ms;
3729 struct dlm_mhandle *mh;
3730 int to_nodeid, error;
3732 to_nodeid = dlm_dir_nodeid(r);
3734 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3738 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh,
3743 send_args(r, lkb, ms);
3745 error = send_message(mh, ms, r->res_name, r->res_length);
3751 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3755 static int send_remove(struct dlm_rsb *r)
3757 struct dlm_message *ms;
3758 struct dlm_mhandle *mh;
3759 int to_nodeid, error;
3761 to_nodeid = dlm_dir_nodeid(r);
3763 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh,
3768 memcpy(ms->m_extra, r->res_name, r->res_length);
3769 ms->m_hash = cpu_to_le32(r->res_hash);
3771 error = send_message(mh, ms, r->res_name, r->res_length);
3776 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3779 struct dlm_message *ms;
3780 struct dlm_mhandle *mh;
3781 int to_nodeid, error;
3783 to_nodeid = lkb->lkb_nodeid;
3785 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh, GFP_NOFS);
3789 send_args(r, lkb, ms);
3791 ms->m_result = cpu_to_le32(to_dlm_errno(rv));
3793 error = send_message(mh, ms, r->res_name, r->res_length);
3798 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3800 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3803 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3805 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3808 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3810 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3813 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3815 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3818 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3819 int ret_nodeid, int rv)
3821 struct dlm_rsb *r = &ls->ls_stub_rsb;
3822 struct dlm_message *ms;
3823 struct dlm_mhandle *mh;
3824 int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid);
3826 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh,
3831 ms->m_lkid = ms_in->m_lkid;
3832 ms->m_result = cpu_to_le32(to_dlm_errno(rv));
3833 ms->m_nodeid = cpu_to_le32(ret_nodeid);
3835 error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in));
3840 /* which args we save from a received message depends heavily on the type
3841 of message, unlike the send side where we can safely send everything about
3842 the lkb for any type of message */
3844 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3846 lkb->lkb_exflags = le32_to_cpu(ms->m_exflags);
3847 lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags);
3848 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3849 (le32_to_cpu(ms->m_flags) & 0x0000FFFF);
3852 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3854 if (ms->m_flags == cpu_to_le32(DLM_IFL_STUB_MS))
3857 lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags);
3858 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3859 (le32_to_cpu(ms->m_flags) & 0x0000FFFF);
3862 static int receive_extralen(struct dlm_message *ms)
3864 return (le16_to_cpu(ms->m_header.h_length) -
3865 sizeof(struct dlm_message));
3868 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3869 struct dlm_message *ms)
3873 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3874 if (!lkb->lkb_lvbptr)
3875 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3876 if (!lkb->lkb_lvbptr)
3878 len = receive_extralen(ms);
3879 if (len > ls->ls_lvblen)
3880 len = ls->ls_lvblen;
3881 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3886 static void fake_bastfn(void *astparam, int mode)
3888 log_print("fake_bastfn should not be called");
3891 static void fake_astfn(void *astparam)
3893 log_print("fake_astfn should not be called");
3896 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3897 struct dlm_message *ms)
3899 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
3900 lkb->lkb_ownpid = le32_to_cpu(ms->m_pid);
3901 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
3902 lkb->lkb_grmode = DLM_LOCK_IV;
3903 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
3905 lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL;
3906 lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL;
3908 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3909 /* lkb was just created so there won't be an lvb yet */
3910 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3911 if (!lkb->lkb_lvbptr)
3918 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3919 struct dlm_message *ms)
3921 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3924 if (receive_lvb(ls, lkb, ms))
3927 lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
3928 lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
3933 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3934 struct dlm_message *ms)
3936 if (receive_lvb(ls, lkb, ms))
3941 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3942 uses to send a reply and that the remote end uses to process the reply. */
3944 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3946 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3947 lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
3948 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
3951 /* This is called after the rsb is locked so that we can safely inspect
3952 fields in the lkb. */
3954 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3956 int from = le32_to_cpu(ms->m_header.h_nodeid);
3959 /* currently mixing of user/kernel locks are not supported */
3960 if (ms->m_flags & cpu_to_le32(DLM_IFL_USER) &&
3961 ~lkb->lkb_flags & DLM_IFL_USER) {
3962 log_error(lkb->lkb_resource->res_ls,
3963 "got user dlm message for a kernel lock");
3968 switch (ms->m_type) {
3969 case cpu_to_le32(DLM_MSG_CONVERT):
3970 case cpu_to_le32(DLM_MSG_UNLOCK):
3971 case cpu_to_le32(DLM_MSG_CANCEL):
3972 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3976 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
3977 case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
3978 case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
3979 case cpu_to_le32(DLM_MSG_GRANT):
3980 case cpu_to_le32(DLM_MSG_BAST):
3981 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3985 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
3986 if (!is_process_copy(lkb))
3988 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
3998 log_error(lkb->lkb_resource->res_ls,
3999 "ignore invalid message %d from %d %x %x %x %d",
4000 le32_to_cpu(ms->m_type), from, lkb->lkb_id,
4001 lkb->lkb_remid, lkb->lkb_flags, lkb->lkb_nodeid);
4005 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4007 struct dlm_lkb *lkb;
4010 int error, namelen = 0;
4012 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
4014 error = create_lkb(ls, &lkb);
4018 receive_flags(lkb, ms);
4019 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4020 error = receive_request_args(ls, lkb, ms);
4026 /* The dir node is the authority on whether we are the master
4027 for this rsb or not, so if the master sends us a request, we should
4028 recreate the rsb if we've destroyed it. This race happens when we
4029 send a remove message to the dir node at the same time that the dir
4030 node sends us a request for the rsb. */
4032 namelen = receive_extralen(ms);
4034 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4035 R_RECEIVE_REQUEST, &r);
4043 if (r->res_master_nodeid != dlm_our_nodeid()) {
4044 error = validate_master_nodeid(ls, r, from_nodeid);
4054 error = do_request(r, lkb);
4055 send_request_reply(r, lkb, error);
4056 do_request_effects(r, lkb, error);
4061 if (error == -EINPROGRESS)
4068 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4069 and do this receive_request again from process_lookup_list once
4070 we get the lookup reply. This would avoid a many repeated
4071 ENOTBLK request failures when the lookup reply designating us
4072 as master is delayed. */
4074 if (error != -ENOTBLK) {
4075 log_limit(ls, "receive_request %x from %d %d",
4076 le32_to_cpu(ms->m_lkid), from_nodeid, error);
4079 setup_stub_lkb(ls, ms);
4080 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4084 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4086 struct dlm_lkb *lkb;
4088 int error, reply = 1;
4090 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4094 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
4095 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4096 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4097 (unsigned long long)lkb->lkb_recover_seq,
4098 le32_to_cpu(ms->m_header.h_nodeid),
4099 le32_to_cpu(ms->m_lkid));
4105 r = lkb->lkb_resource;
4110 error = validate_message(lkb, ms);
4114 receive_flags(lkb, ms);
4116 error = receive_convert_args(ls, lkb, ms);
4118 send_convert_reply(r, lkb, error);
4122 reply = !down_conversion(lkb);
4124 error = do_convert(r, lkb);
4126 send_convert_reply(r, lkb, error);
4127 do_convert_effects(r, lkb, error);
4135 setup_stub_lkb(ls, ms);
4136 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4140 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4142 struct dlm_lkb *lkb;
4146 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4150 if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
4151 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4152 lkb->lkb_id, lkb->lkb_remid,
4153 le32_to_cpu(ms->m_header.h_nodeid),
4154 le32_to_cpu(ms->m_lkid));
4160 r = lkb->lkb_resource;
4165 error = validate_message(lkb, ms);
4169 receive_flags(lkb, ms);
4171 error = receive_unlock_args(ls, lkb, ms);
4173 send_unlock_reply(r, lkb, error);
4177 error = do_unlock(r, lkb);
4178 send_unlock_reply(r, lkb, error);
4179 do_unlock_effects(r, lkb, error);
4187 setup_stub_lkb(ls, ms);
4188 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4192 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4194 struct dlm_lkb *lkb;
4198 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4202 receive_flags(lkb, ms);
4204 r = lkb->lkb_resource;
4209 error = validate_message(lkb, ms);
4213 error = do_cancel(r, lkb);
4214 send_cancel_reply(r, lkb, error);
4215 do_cancel_effects(r, lkb, error);
4223 setup_stub_lkb(ls, ms);
4224 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4228 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4230 struct dlm_lkb *lkb;
4234 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4238 r = lkb->lkb_resource;
4243 error = validate_message(lkb, ms);
4247 receive_flags_reply(lkb, ms);
4248 if (is_altmode(lkb))
4249 munge_altmode(lkb, ms);
4250 grant_lock_pc(r, lkb, ms);
4251 queue_cast(r, lkb, 0);
4259 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4261 struct dlm_lkb *lkb;
4265 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4269 r = lkb->lkb_resource;
4274 error = validate_message(lkb, ms);
4278 queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode));
4279 lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode);
4287 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4289 int len, error, ret_nodeid, from_nodeid, our_nodeid;
4291 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
4292 our_nodeid = dlm_our_nodeid();
4294 len = receive_extralen(ms);
4296 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4299 /* Optimization: we're master so treat lookup as a request */
4300 if (!error && ret_nodeid == our_nodeid) {
4301 receive_request(ls, ms);
4304 send_lookup_reply(ls, ms, ret_nodeid, error);
4307 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4309 char name[DLM_RESNAME_MAXLEN+1];
4312 int rv, len, dir_nodeid, from_nodeid;
4314 from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
4316 len = receive_extralen(ms);
4318 if (len > DLM_RESNAME_MAXLEN) {
4319 log_error(ls, "receive_remove from %d bad len %d",
4324 dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash));
4325 if (dir_nodeid != dlm_our_nodeid()) {
4326 log_error(ls, "receive_remove from %d bad nodeid %d",
4327 from_nodeid, dir_nodeid);
4331 /* Look for name on rsbtbl.toss, if it's there, kill it.
4332 If it's on rsbtbl.keep, it's being used, and we should ignore this
4333 message. This is an expected race between the dir node sending a
4334 request to the master node at the same time as the master node sends
4335 a remove to the dir node. The resolution to that race is for the
4336 dir node to ignore the remove message, and the master node to
4337 recreate the master rsb when it gets a request from the dir node for
4338 an rsb it doesn't have. */
4340 memset(name, 0, sizeof(name));
4341 memcpy(name, ms->m_extra, len);
4343 hash = jhash(name, len, 0);
4344 b = hash & (ls->ls_rsbtbl_size - 1);
4346 spin_lock(&ls->ls_rsbtbl[b].lock);
4348 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4350 /* verify the rsb is on keep list per comment above */
4351 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4353 /* should not happen */
4354 log_error(ls, "receive_remove from %d not found %s",
4356 spin_unlock(&ls->ls_rsbtbl[b].lock);
4359 if (r->res_master_nodeid != from_nodeid) {
4360 /* should not happen */
4361 log_error(ls, "receive_remove keep from %d master %d",
4362 from_nodeid, r->res_master_nodeid);
4364 spin_unlock(&ls->ls_rsbtbl[b].lock);
4368 log_debug(ls, "receive_remove from %d master %d first %x %s",
4369 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4371 spin_unlock(&ls->ls_rsbtbl[b].lock);
4375 if (r->res_master_nodeid != from_nodeid) {
4376 log_error(ls, "receive_remove toss from %d master %d",
4377 from_nodeid, r->res_master_nodeid);
4379 spin_unlock(&ls->ls_rsbtbl[b].lock);
4383 if (kref_put(&r->res_ref, kill_rsb)) {
4384 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4385 spin_unlock(&ls->ls_rsbtbl[b].lock);
4388 log_error(ls, "receive_remove from %d rsb ref error",
4391 spin_unlock(&ls->ls_rsbtbl[b].lock);
4395 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4397 do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid));
4400 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4402 struct dlm_lkb *lkb;
4404 int error, mstype, result;
4405 int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
4407 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4411 r = lkb->lkb_resource;
4415 error = validate_message(lkb, ms);
4419 mstype = lkb->lkb_wait_type;
4420 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4422 log_error(ls, "receive_request_reply %x remote %d %x result %d",
4423 lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid),
4424 from_dlm_errno(le32_to_cpu(ms->m_result)));
4429 /* Optimization: the dir node was also the master, so it took our
4430 lookup as a request and sent request reply instead of lookup reply */
4431 if (mstype == DLM_MSG_LOOKUP) {
4432 r->res_master_nodeid = from_nodeid;
4433 r->res_nodeid = from_nodeid;
4434 lkb->lkb_nodeid = from_nodeid;
4437 /* this is the value returned from do_request() on the master */
4438 result = from_dlm_errno(le32_to_cpu(ms->m_result));
4442 /* request would block (be queued) on remote master */
4443 queue_cast(r, lkb, -EAGAIN);
4444 confirm_master(r, -EAGAIN);
4445 unhold_lkb(lkb); /* undoes create_lkb() */
4450 /* request was queued or granted on remote master */
4451 receive_flags_reply(lkb, ms);
4452 lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
4453 if (is_altmode(lkb))
4454 munge_altmode(lkb, ms);
4456 add_lkb(r, lkb, DLM_LKSTS_WAITING);
4459 grant_lock_pc(r, lkb, ms);
4460 queue_cast(r, lkb, 0);
4462 confirm_master(r, result);
4467 /* find_rsb failed to find rsb or rsb wasn't master */
4468 log_limit(ls, "receive_request_reply %x from %d %d "
4469 "master %d dir %d first %x %s", lkb->lkb_id,
4470 from_nodeid, result, r->res_master_nodeid,
4471 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4473 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4474 r->res_master_nodeid != dlm_our_nodeid()) {
4475 /* cause _request_lock->set_master->send_lookup */
4476 r->res_master_nodeid = 0;
4478 lkb->lkb_nodeid = -1;
4481 if (is_overlap(lkb)) {
4482 /* we'll ignore error in cancel/unlock reply */
4483 queue_cast_overlap(r, lkb);
4484 confirm_master(r, result);
4485 unhold_lkb(lkb); /* undoes create_lkb() */
4487 _request_lock(r, lkb);
4489 if (r->res_master_nodeid == dlm_our_nodeid())
4490 confirm_master(r, 0);
4495 log_error(ls, "receive_request_reply %x error %d",
4496 lkb->lkb_id, result);
4499 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4500 log_debug(ls, "receive_request_reply %x result %d unlock",
4501 lkb->lkb_id, result);
4502 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4503 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4504 send_unlock(r, lkb);
4505 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4506 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4507 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4508 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4509 send_cancel(r, lkb);
4511 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4512 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4521 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4522 struct dlm_message *ms)
4524 /* this is the value returned from do_convert() on the master */
4525 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
4527 /* convert would block (be queued) on remote master */
4528 queue_cast(r, lkb, -EAGAIN);
4532 receive_flags_reply(lkb, ms);
4533 revert_lock_pc(r, lkb);
4534 queue_cast(r, lkb, -EDEADLK);
4538 /* convert was queued on remote master */
4539 receive_flags_reply(lkb, ms);
4540 if (is_demoted(lkb))
4543 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4548 /* convert was granted on remote master */
4549 receive_flags_reply(lkb, ms);
4550 if (is_demoted(lkb))
4552 grant_lock_pc(r, lkb, ms);
4553 queue_cast(r, lkb, 0);
4557 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4558 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
4559 le32_to_cpu(ms->m_lkid),
4560 from_dlm_errno(le32_to_cpu(ms->m_result)));
4566 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4568 struct dlm_rsb *r = lkb->lkb_resource;
4574 error = validate_message(lkb, ms);
4578 /* stub reply can happen with waiters_mutex held */
4579 error = remove_from_waiters_ms(lkb, ms);
4583 __receive_convert_reply(r, lkb, ms);
4589 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4591 struct dlm_lkb *lkb;
4594 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4598 _receive_convert_reply(lkb, ms);
4603 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4605 struct dlm_rsb *r = lkb->lkb_resource;
4611 error = validate_message(lkb, ms);
4615 /* stub reply can happen with waiters_mutex held */
4616 error = remove_from_waiters_ms(lkb, ms);
4620 /* this is the value returned from do_unlock() on the master */
4622 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
4624 receive_flags_reply(lkb, ms);
4625 remove_lock_pc(r, lkb);
4626 queue_cast(r, lkb, -DLM_EUNLOCK);
4631 log_error(r->res_ls, "receive_unlock_reply %x error %d",
4632 lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result)));
4639 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4641 struct dlm_lkb *lkb;
4644 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4648 _receive_unlock_reply(lkb, ms);
4653 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4655 struct dlm_rsb *r = lkb->lkb_resource;
4661 error = validate_message(lkb, ms);
4665 /* stub reply can happen with waiters_mutex held */
4666 error = remove_from_waiters_ms(lkb, ms);
4670 /* this is the value returned from do_cancel() on the master */
4672 switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
4674 receive_flags_reply(lkb, ms);
4675 revert_lock_pc(r, lkb);
4676 queue_cast(r, lkb, -DLM_ECANCEL);
4681 log_error(r->res_ls, "receive_cancel_reply %x error %d",
4683 from_dlm_errno(le32_to_cpu(ms->m_result)));
4690 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4692 struct dlm_lkb *lkb;
4695 error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
4699 _receive_cancel_reply(lkb, ms);
4704 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4706 struct dlm_lkb *lkb;
4708 int error, ret_nodeid;
4709 int do_lookup_list = 0;
4711 error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb);
4713 log_error(ls, "%s no lkid %x", __func__,
4714 le32_to_cpu(ms->m_lkid));
4718 /* ms->m_result is the value returned by dlm_master_lookup on dir node
4719 FIXME: will a non-zero error ever be returned? */
4721 r = lkb->lkb_resource;
4725 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4729 ret_nodeid = le32_to_cpu(ms->m_nodeid);
4731 /* We sometimes receive a request from the dir node for this
4732 rsb before we've received the dir node's loookup_reply for it.
4733 The request from the dir node implies we're the master, so we set
4734 ourself as master in receive_request_reply, and verify here that
4735 we are indeed the master. */
4737 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4738 /* This should never happen */
4739 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4740 "master %d dir %d our %d first %x %s",
4741 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
4742 ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
4743 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4746 if (ret_nodeid == dlm_our_nodeid()) {
4747 r->res_master_nodeid = ret_nodeid;
4750 r->res_first_lkid = 0;
4751 } else if (ret_nodeid == -1) {
4752 /* the remote node doesn't believe it's the dir node */
4753 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4754 lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid));
4755 r->res_master_nodeid = 0;
4757 lkb->lkb_nodeid = -1;
4759 /* set_master() will set lkb_nodeid from r */
4760 r->res_master_nodeid = ret_nodeid;
4761 r->res_nodeid = ret_nodeid;
4764 if (is_overlap(lkb)) {
4765 log_debug(ls, "receive_lookup_reply %x unlock %x",
4766 lkb->lkb_id, lkb->lkb_flags);
4767 queue_cast_overlap(r, lkb);
4768 unhold_lkb(lkb); /* undoes create_lkb() */
4772 _request_lock(r, lkb);
4776 process_lookup_list(r);
4783 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4786 int error = 0, noent = 0;
4788 if (!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid))) {
4789 log_limit(ls, "receive %d from non-member %d %x %x %d",
4790 le32_to_cpu(ms->m_type),
4791 le32_to_cpu(ms->m_header.h_nodeid),
4792 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
4793 from_dlm_errno(le32_to_cpu(ms->m_result)));
4797 switch (ms->m_type) {
4799 /* messages sent to a master node */
4801 case cpu_to_le32(DLM_MSG_REQUEST):
4802 error = receive_request(ls, ms);
4805 case cpu_to_le32(DLM_MSG_CONVERT):
4806 error = receive_convert(ls, ms);
4809 case cpu_to_le32(DLM_MSG_UNLOCK):
4810 error = receive_unlock(ls, ms);
4813 case cpu_to_le32(DLM_MSG_CANCEL):
4815 error = receive_cancel(ls, ms);
4818 /* messages sent from a master node (replies to above) */
4820 case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
4821 error = receive_request_reply(ls, ms);
4824 case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
4825 error = receive_convert_reply(ls, ms);
4828 case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
4829 error = receive_unlock_reply(ls, ms);
4832 case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
4833 error = receive_cancel_reply(ls, ms);
4836 /* messages sent from a master node (only two types of async msg) */
4838 case cpu_to_le32(DLM_MSG_GRANT):
4840 error = receive_grant(ls, ms);
4843 case cpu_to_le32(DLM_MSG_BAST):
4845 error = receive_bast(ls, ms);
4848 /* messages sent to a dir node */
4850 case cpu_to_le32(DLM_MSG_LOOKUP):
4851 receive_lookup(ls, ms);
4854 case cpu_to_le32(DLM_MSG_REMOVE):
4855 receive_remove(ls, ms);
4858 /* messages sent from a dir node (remove has no reply) */
4860 case cpu_to_le32(DLM_MSG_LOOKUP_REPLY):
4861 receive_lookup_reply(ls, ms);
4864 /* other messages */
4866 case cpu_to_le32(DLM_MSG_PURGE):
4867 receive_purge(ls, ms);
4871 log_error(ls, "unknown message type %d",
4872 le32_to_cpu(ms->m_type));
4876 * When checking for ENOENT, we're checking the result of
4877 * find_lkb(m_remid):
4879 * The lock id referenced in the message wasn't found. This may
4880 * happen in normal usage for the async messages and cancel, so
4881 * only use log_debug for them.
4883 * Some errors are expected and normal.
4886 if (error == -ENOENT && noent) {
4887 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4888 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
4889 le32_to_cpu(ms->m_header.h_nodeid),
4890 le32_to_cpu(ms->m_lkid), saved_seq);
4891 } else if (error == -ENOENT) {
4892 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4893 le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
4894 le32_to_cpu(ms->m_header.h_nodeid),
4895 le32_to_cpu(ms->m_lkid), saved_seq);
4897 if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT))
4898 dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash));
4901 if (error == -EINVAL) {
4902 log_error(ls, "receive %d inval from %d lkid %x remid %x "
4904 le32_to_cpu(ms->m_type),
4905 le32_to_cpu(ms->m_header.h_nodeid),
4906 le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
4911 /* If the lockspace is in recovery mode (locking stopped), then normal
4912 messages are saved on the requestqueue for processing after recovery is
4913 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
4914 messages off the requestqueue before we process new ones. This occurs right
4915 after recovery completes when we transition from saving all messages on
4916 requestqueue, to processing all the saved messages, to processing new
4917 messages as they arrive. */
4919 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4922 if (dlm_locking_stopped(ls)) {
4923 /* If we were a member of this lockspace, left, and rejoined,
4924 other nodes may still be sending us messages from the
4925 lockspace generation before we left. */
4926 if (!ls->ls_generation) {
4927 log_limit(ls, "receive %d from %d ignore old gen",
4928 le32_to_cpu(ms->m_type), nodeid);
4932 dlm_add_requestqueue(ls, nodeid, ms);
4934 dlm_wait_requestqueue(ls);
4935 _receive_message(ls, ms, 0);
4939 /* This is called by dlm_recoverd to process messages that were saved on
4940 the requestqueue. */
4942 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
4945 _receive_message(ls, ms, saved_seq);
4948 /* This is called by the midcomms layer when something is received for
4949 the lockspace. It could be either a MSG (normal message sent as part of
4950 standard locking activity) or an RCOM (recovery message sent as part of
4951 lockspace recovery). */
4953 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
4955 struct dlm_header *hd = &p->header;
4959 switch (hd->h_cmd) {
4961 type = le32_to_cpu(p->message.m_type);
4964 type = le32_to_cpu(p->rcom.rc_type);
4967 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
4971 if (le32_to_cpu(hd->h_nodeid) != nodeid) {
4972 log_print("invalid h_nodeid %d from %d lockspace %x",
4973 le32_to_cpu(hd->h_nodeid), nodeid,
4974 le32_to_cpu(hd->u.h_lockspace));
4978 ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace));
4980 if (dlm_config.ci_log_debug) {
4981 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
4982 "%u from %d cmd %d type %d\n",
4983 le32_to_cpu(hd->u.h_lockspace), nodeid,
4987 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
4988 dlm_send_ls_not_ready(nodeid, &p->rcom);
4992 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
4993 be inactive (in this ls) before transitioning to recovery mode */
4995 down_read(&ls->ls_recv_active);
4996 if (hd->h_cmd == DLM_MSG)
4997 dlm_receive_message(ls, &p->message, nodeid);
4998 else if (hd->h_cmd == DLM_RCOM)
4999 dlm_receive_rcom(ls, &p->rcom, nodeid);
5001 log_error(ls, "invalid h_cmd %d from %d lockspace %x",
5002 hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace));
5003 up_read(&ls->ls_recv_active);
5005 dlm_put_lockspace(ls);
5008 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5009 struct dlm_message *ms_stub)
5011 if (middle_conversion(lkb)) {
5013 memset(ms_stub, 0, sizeof(struct dlm_message));
5014 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5015 ms_stub->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
5016 ms_stub->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS));
5017 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
5018 _receive_convert_reply(lkb, ms_stub);
5020 /* Same special case as in receive_rcom_lock_args() */
5021 lkb->lkb_grmode = DLM_LOCK_IV;
5022 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5025 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5026 lkb->lkb_flags |= DLM_IFL_RESEND;
5029 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5030 conversions are async; there's no reply from the remote master */
5033 /* A waiting lkb needs recovery if the master node has failed, or
5034 the master node is changing (only when no directory is used) */
5036 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5039 if (dlm_no_directory(ls))
5042 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5048 /* Recovery for locks that are waiting for replies from nodes that are now
5049 gone. We can just complete unlocks and cancels by faking a reply from the
5050 dead node. Requests and up-conversions we flag to be resent after
5051 recovery. Down-conversions can just be completed with a fake reply like
5052 unlocks. Conversions between PR and CW need special attention. */
5054 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5056 struct dlm_lkb *lkb, *safe;
5057 struct dlm_message *ms_stub;
5058 int wait_type, stub_unlock_result, stub_cancel_result;
5061 ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
5065 mutex_lock(&ls->ls_waiters_mutex);
5067 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5069 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5071 /* exclude debug messages about unlocks because there can be so
5072 many and they aren't very interesting */
5074 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5075 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5076 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5080 lkb->lkb_resource->res_nodeid,
5082 lkb->lkb_wait_nodeid,
5086 /* all outstanding lookups, regardless of destination will be
5087 resent after recovery is done */
5089 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5090 lkb->lkb_flags |= DLM_IFL_RESEND;
5094 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5097 wait_type = lkb->lkb_wait_type;
5098 stub_unlock_result = -DLM_EUNLOCK;
5099 stub_cancel_result = -DLM_ECANCEL;
5101 /* Main reply may have been received leaving a zero wait_type,
5102 but a reply for the overlapping op may not have been
5103 received. In that case we need to fake the appropriate
5104 reply for the overlap op. */
5107 if (is_overlap_cancel(lkb)) {
5108 wait_type = DLM_MSG_CANCEL;
5109 if (lkb->lkb_grmode == DLM_LOCK_IV)
5110 stub_cancel_result = 0;
5112 if (is_overlap_unlock(lkb)) {
5113 wait_type = DLM_MSG_UNLOCK;
5114 if (lkb->lkb_grmode == DLM_LOCK_IV)
5115 stub_unlock_result = -ENOENT;
5118 log_debug(ls, "rwpre overlap %x %x %d %d %d",
5119 lkb->lkb_id, lkb->lkb_flags, wait_type,
5120 stub_cancel_result, stub_unlock_result);
5123 switch (wait_type) {
5125 case DLM_MSG_REQUEST:
5126 lkb->lkb_flags |= DLM_IFL_RESEND;
5129 case DLM_MSG_CONVERT:
5130 recover_convert_waiter(ls, lkb, ms_stub);
5133 case DLM_MSG_UNLOCK:
5135 memset(ms_stub, 0, sizeof(struct dlm_message));
5136 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5137 ms_stub->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY);
5138 ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_unlock_result));
5139 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
5140 _receive_unlock_reply(lkb, ms_stub);
5144 case DLM_MSG_CANCEL:
5146 memset(ms_stub, 0, sizeof(struct dlm_message));
5147 ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
5148 ms_stub->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY);
5149 ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_cancel_result));
5150 ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
5151 _receive_cancel_reply(lkb, ms_stub);
5156 log_error(ls, "invalid lkb wait_type %d %d",
5157 lkb->lkb_wait_type, wait_type);
5161 mutex_unlock(&ls->ls_waiters_mutex);
5165 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5167 struct dlm_lkb *lkb = NULL, *iter;
5169 mutex_lock(&ls->ls_waiters_mutex);
5170 list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
5171 if (iter->lkb_flags & DLM_IFL_RESEND) {
5177 mutex_unlock(&ls->ls_waiters_mutex);
5182 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5183 master or dir-node for r. Processing the lkb may result in it being placed
5186 /* We do this after normal locking has been enabled and any saved messages
5187 (in requestqueue) have been processed. We should be confident that at
5188 this point we won't get or process a reply to any of these waiting
5189 operations. But, new ops may be coming in on the rsbs/locks here from
5190 userspace or remotely. */
5192 /* there may have been an overlap unlock/cancel prior to recovery or after
5193 recovery. if before, the lkb may still have a pos wait_count; if after, the
5194 overlap flag would just have been set and nothing new sent. we can be
5195 confident here than any replies to either the initial op or overlap ops
5196 prior to recovery have been received. */
5198 int dlm_recover_waiters_post(struct dlm_ls *ls)
5200 struct dlm_lkb *lkb;
5202 int error = 0, mstype, err, oc, ou;
5205 if (dlm_locking_stopped(ls)) {
5206 log_debug(ls, "recover_waiters_post aborted");
5211 lkb = find_resend_waiter(ls);
5215 r = lkb->lkb_resource;
5219 mstype = lkb->lkb_wait_type;
5220 oc = is_overlap_cancel(lkb);
5221 ou = is_overlap_unlock(lkb);
5224 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5225 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5226 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5227 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5228 dlm_dir_nodeid(r), oc, ou);
5230 /* At this point we assume that we won't get a reply to any
5231 previous op or overlap op on this lock. First, do a big
5232 remove_from_waiters() for all previous ops. */
5234 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5235 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5236 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5237 lkb->lkb_wait_type = 0;
5238 /* drop all wait_count references we still
5239 * hold a reference for this iteration.
5241 while (lkb->lkb_wait_count) {
5242 lkb->lkb_wait_count--;
5245 mutex_lock(&ls->ls_waiters_mutex);
5246 list_del_init(&lkb->lkb_wait_reply);
5247 mutex_unlock(&ls->ls_waiters_mutex);
5250 /* do an unlock or cancel instead of resending */
5252 case DLM_MSG_LOOKUP:
5253 case DLM_MSG_REQUEST:
5254 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5256 unhold_lkb(lkb); /* undoes create_lkb() */
5258 case DLM_MSG_CONVERT:
5260 queue_cast(r, lkb, -DLM_ECANCEL);
5262 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5263 _unlock_lock(r, lkb);
5271 case DLM_MSG_LOOKUP:
5272 case DLM_MSG_REQUEST:
5273 _request_lock(r, lkb);
5275 confirm_master(r, 0);
5277 case DLM_MSG_CONVERT:
5278 _convert_lock(r, lkb);
5286 log_error(ls, "waiter %x msg %d r_nodeid %d "
5287 "dir_nodeid %d overlap %d %d",
5288 lkb->lkb_id, mstype, r->res_nodeid,
5289 dlm_dir_nodeid(r), oc, ou);
5299 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5300 struct list_head *list)
5302 struct dlm_lkb *lkb, *safe;
5304 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5305 if (!is_master_copy(lkb))
5308 /* don't purge lkbs we've added in recover_master_copy for
5309 the current recovery seq */
5311 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5316 /* this put should free the lkb */
5317 if (!dlm_put_lkb(lkb))
5318 log_error(ls, "purged mstcpy lkb not released");
5322 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5324 struct dlm_ls *ls = r->res_ls;
5326 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5327 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5328 purge_mstcpy_list(ls, r, &r->res_waitqueue);
5331 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5332 struct list_head *list,
5333 int nodeid_gone, unsigned int *count)
5335 struct dlm_lkb *lkb, *safe;
5337 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5338 if (!is_master_copy(lkb))
5341 if ((lkb->lkb_nodeid == nodeid_gone) ||
5342 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5344 /* tell recover_lvb to invalidate the lvb
5345 because a node holding EX/PW failed */
5346 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5347 (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5348 rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5353 /* this put should free the lkb */
5354 if (!dlm_put_lkb(lkb))
5355 log_error(ls, "purged dead lkb not released");
5357 rsb_set_flag(r, RSB_RECOVER_GRANT);
5364 /* Get rid of locks held by nodes that are gone. */
5366 void dlm_recover_purge(struct dlm_ls *ls)
5369 struct dlm_member *memb;
5370 int nodes_count = 0;
5371 int nodeid_gone = 0;
5372 unsigned int lkb_count = 0;
5374 /* cache one removed nodeid to optimize the common
5375 case of a single node removed */
5377 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5379 nodeid_gone = memb->nodeid;
5385 down_write(&ls->ls_root_sem);
5386 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5390 purge_dead_list(ls, r, &r->res_grantqueue,
5391 nodeid_gone, &lkb_count);
5392 purge_dead_list(ls, r, &r->res_convertqueue,
5393 nodeid_gone, &lkb_count);
5394 purge_dead_list(ls, r, &r->res_waitqueue,
5395 nodeid_gone, &lkb_count);
5401 up_write(&ls->ls_root_sem);
5404 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5405 lkb_count, nodes_count);
5408 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5413 spin_lock(&ls->ls_rsbtbl[bucket].lock);
5414 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5415 r = rb_entry(n, struct dlm_rsb, res_hashnode);
5417 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5419 if (!is_master(r)) {
5420 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5424 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5427 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5432 * Attempt to grant locks on resources that we are the master of.
5433 * Locks may have become grantable during recovery because locks
5434 * from departed nodes have been purged (or not rebuilt), allowing
5435 * previously blocked locks to now be granted. The subset of rsb's
5436 * we are interested in are those with lkb's on either the convert or
5439 * Simplest would be to go through each master rsb and check for non-empty
5440 * convert or waiting queues, and attempt to grant on those rsbs.
5441 * Checking the queues requires lock_rsb, though, for which we'd need
5442 * to release the rsbtbl lock. This would make iterating through all
5443 * rsb's very inefficient. So, we rely on earlier recovery routines
5444 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5448 void dlm_recover_grant(struct dlm_ls *ls)
5452 unsigned int count = 0;
5453 unsigned int rsb_count = 0;
5454 unsigned int lkb_count = 0;
5457 r = find_grant_rsb(ls, bucket);
5459 if (bucket == ls->ls_rsbtbl_size - 1)
5467 /* the RECOVER_GRANT flag is checked in the grant path */
5468 grant_pending_locks(r, &count);
5469 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5471 confirm_master(r, 0);
5478 log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5479 lkb_count, rsb_count);
5482 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5485 struct dlm_lkb *lkb;
5487 list_for_each_entry(lkb, head, lkb_statequeue) {
5488 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5494 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5497 struct dlm_lkb *lkb;
5499 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5502 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5505 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5511 /* needs at least dlm_rcom + rcom_lock */
5512 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5513 struct dlm_rsb *r, struct dlm_rcom *rc)
5515 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5517 lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
5518 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5519 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5520 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5521 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5522 lkb->lkb_flags |= DLM_IFL_MSTCPY;
5523 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5524 lkb->lkb_rqmode = rl->rl_rqmode;
5525 lkb->lkb_grmode = rl->rl_grmode;
5526 /* don't set lkb_status because add_lkb wants to itself */
5528 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5529 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5531 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5532 int lvblen = le16_to_cpu(rc->rc_header.h_length) -
5533 sizeof(struct dlm_rcom) - sizeof(struct rcom_lock);
5534 if (lvblen > ls->ls_lvblen)
5536 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5537 if (!lkb->lkb_lvbptr)
5539 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5542 /* Conversions between PR and CW (middle modes) need special handling.
5543 The real granted mode of these converting locks cannot be determined
5544 until all locks have been rebuilt on the rsb (recover_conversion) */
5546 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5547 middle_conversion(lkb)) {
5548 rl->rl_status = DLM_LKSTS_CONVERT;
5549 lkb->lkb_grmode = DLM_LOCK_IV;
5550 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5556 /* This lkb may have been recovered in a previous aborted recovery so we need
5557 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5558 If so we just send back a standard reply. If not, we create a new lkb with
5559 the given values and send back our lkid. We send back our lkid by sending
5560 back the rcom_lock struct we got but with the remid field filled in. */
5562 /* needs at least dlm_rcom + rcom_lock */
5563 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5565 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5567 struct dlm_lkb *lkb;
5569 int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
5572 if (rl->rl_parent_lkid) {
5573 error = -EOPNOTSUPP;
5577 remid = le32_to_cpu(rl->rl_lkid);
5579 /* In general we expect the rsb returned to be R_MASTER, but we don't
5580 have to require it. Recovery of masters on one node can overlap
5581 recovery of locks on another node, so one node can send us MSTCPY
5582 locks before we've made ourselves master of this rsb. We can still
5583 add new MSTCPY locks that we receive here without any harm; when
5584 we make ourselves master, dlm_recover_masters() won't touch the
5585 MSTCPY locks we've received early. */
5587 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5588 from_nodeid, R_RECEIVE_RECOVER, &r);
5594 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5595 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5596 from_nodeid, remid);
5601 lkb = search_remid(r, from_nodeid, remid);
5607 error = create_lkb(ls, &lkb);
5611 error = receive_rcom_lock_args(ls, lkb, r, rc);
5618 add_lkb(r, lkb, rl->rl_status);
5619 ls->ls_recover_locks_in++;
5621 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5622 rsb_set_flag(r, RSB_RECOVER_GRANT);
5625 /* this is the new value returned to the lock holder for
5626 saving in its process-copy lkb */
5627 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5629 lkb->lkb_recover_seq = ls->ls_recover_seq;
5635 if (error && error != -EEXIST)
5636 log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5637 from_nodeid, remid, error);
5638 rl->rl_result = cpu_to_le32(error);
5642 /* needs at least dlm_rcom + rcom_lock */
5643 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5645 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5647 struct dlm_lkb *lkb;
5648 uint32_t lkid, remid;
5651 lkid = le32_to_cpu(rl->rl_lkid);
5652 remid = le32_to_cpu(rl->rl_remid);
5653 result = le32_to_cpu(rl->rl_result);
5655 error = find_lkb(ls, lkid, &lkb);
5657 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5658 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5663 r = lkb->lkb_resource;
5667 if (!is_process_copy(lkb)) {
5668 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5669 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5680 /* There's a chance the new master received our lock before
5681 dlm_recover_master_reply(), this wouldn't happen if we did
5682 a barrier between recover_masters and recover_locks. */
5684 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5685 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5688 dlm_send_rcom_lock(r, lkb);
5692 lkb->lkb_remid = remid;
5695 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5696 lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
5700 /* an ack for dlm_recover_locks() which waits for replies from
5701 all the locks it sends to new masters */
5702 dlm_recovered_lock(r);
5711 #ifdef CONFIG_DLM_DEPRECATED_API
5712 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5713 int mode, uint32_t flags, void *name, unsigned int namelen,
5714 unsigned long timeout_cs)
5716 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5717 int mode, uint32_t flags, void *name, unsigned int namelen)
5720 struct dlm_lkb *lkb;
5721 struct dlm_args args;
5725 dlm_lock_recovery(ls);
5727 error = create_lkb(ls, &lkb);
5733 trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags);
5735 if (flags & DLM_LKF_VALBLK) {
5736 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5737 if (!ua->lksb.sb_lvbptr) {
5743 #ifdef CONFIG_DLM_DEPRECATED_API
5744 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5745 fake_astfn, ua, fake_bastfn, &args);
5747 error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua,
5748 fake_bastfn, &args);
5751 kfree(ua->lksb.sb_lvbptr);
5752 ua->lksb.sb_lvbptr = NULL;
5757 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5758 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5759 lock and that lkb_astparam is the dlm_user_args structure. */
5760 lkb->lkb_flags |= DLM_IFL_USER;
5761 error = request_lock(ls, lkb, name, namelen, &args);
5776 /* add this new lkb to the per-process list of locks */
5777 spin_lock(&ua->proc->locks_spin);
5779 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5780 spin_unlock(&ua->proc->locks_spin);
5783 trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false);
5787 dlm_unlock_recovery(ls);
5791 #ifdef CONFIG_DLM_DEPRECATED_API
5792 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5793 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5794 unsigned long timeout_cs)
5796 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5797 int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
5800 struct dlm_lkb *lkb;
5801 struct dlm_args args;
5802 struct dlm_user_args *ua;
5805 dlm_lock_recovery(ls);
5807 error = find_lkb(ls, lkid, &lkb);
5811 trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags);
5813 /* user can change the params on its lock when it converts it, or
5814 add an lvb that didn't exist before */
5818 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5819 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5820 if (!ua->lksb.sb_lvbptr) {
5825 if (lvb_in && ua->lksb.sb_lvbptr)
5826 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5828 ua->xid = ua_tmp->xid;
5829 ua->castparam = ua_tmp->castparam;
5830 ua->castaddr = ua_tmp->castaddr;
5831 ua->bastparam = ua_tmp->bastparam;
5832 ua->bastaddr = ua_tmp->bastaddr;
5833 ua->user_lksb = ua_tmp->user_lksb;
5835 #ifdef CONFIG_DLM_DEPRECATED_API
5836 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5837 fake_astfn, ua, fake_bastfn, &args);
5839 error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua,
5840 fake_bastfn, &args);
5845 error = convert_lock(ls, lkb, &args);
5847 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5850 trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false);
5853 dlm_unlock_recovery(ls);
5859 * The caller asks for an orphan lock on a given resource with a given mode.
5860 * If a matching lock exists, it's moved to the owner's list of locks and
5861 * the lkid is returned.
5864 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5865 int mode, uint32_t flags, void *name, unsigned int namelen,
5868 struct dlm_lkb *lkb = NULL, *iter;
5869 struct dlm_user_args *ua;
5870 int found_other_mode = 0;
5873 mutex_lock(&ls->ls_orphans_mutex);
5874 list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
5875 if (iter->lkb_resource->res_length != namelen)
5877 if (memcmp(iter->lkb_resource->res_name, name, namelen))
5879 if (iter->lkb_grmode != mode) {
5880 found_other_mode = 1;
5885 list_del_init(&iter->lkb_ownqueue);
5886 iter->lkb_flags &= ~DLM_IFL_ORPHAN;
5887 *lkid = iter->lkb_id;
5890 mutex_unlock(&ls->ls_orphans_mutex);
5892 if (!lkb && found_other_mode) {
5902 lkb->lkb_exflags = flags;
5903 lkb->lkb_ownpid = (int) current->pid;
5907 ua->proc = ua_tmp->proc;
5908 ua->xid = ua_tmp->xid;
5909 ua->castparam = ua_tmp->castparam;
5910 ua->castaddr = ua_tmp->castaddr;
5911 ua->bastparam = ua_tmp->bastparam;
5912 ua->bastaddr = ua_tmp->bastaddr;
5913 ua->user_lksb = ua_tmp->user_lksb;
5916 * The lkb reference from the ls_orphans list was not
5917 * removed above, and is now considered the reference
5918 * for the proc locks list.
5921 spin_lock(&ua->proc->locks_spin);
5922 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5923 spin_unlock(&ua->proc->locks_spin);
5929 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5930 uint32_t flags, uint32_t lkid, char *lvb_in)
5932 struct dlm_lkb *lkb;
5933 struct dlm_args args;
5934 struct dlm_user_args *ua;
5937 dlm_lock_recovery(ls);
5939 error = find_lkb(ls, lkid, &lkb);
5943 trace_dlm_unlock_start(ls, lkb, flags);
5947 if (lvb_in && ua->lksb.sb_lvbptr)
5948 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5949 if (ua_tmp->castparam)
5950 ua->castparam = ua_tmp->castparam;
5951 ua->user_lksb = ua_tmp->user_lksb;
5953 error = set_unlock_args(flags, ua, &args);
5957 error = unlock_lock(ls, lkb, &args);
5959 if (error == -DLM_EUNLOCK)
5961 /* from validate_unlock_args() */
5962 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
5967 spin_lock(&ua->proc->locks_spin);
5968 /* dlm_user_add_cb() may have already taken lkb off the proc list */
5969 if (!list_empty(&lkb->lkb_ownqueue))
5970 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
5971 spin_unlock(&ua->proc->locks_spin);
5973 trace_dlm_unlock_end(ls, lkb, flags, error);
5976 dlm_unlock_recovery(ls);
5981 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5982 uint32_t flags, uint32_t lkid)
5984 struct dlm_lkb *lkb;
5985 struct dlm_args args;
5986 struct dlm_user_args *ua;
5989 dlm_lock_recovery(ls);
5991 error = find_lkb(ls, lkid, &lkb);
5995 trace_dlm_unlock_start(ls, lkb, flags);
5998 if (ua_tmp->castparam)
5999 ua->castparam = ua_tmp->castparam;
6000 ua->user_lksb = ua_tmp->user_lksb;
6002 error = set_unlock_args(flags, ua, &args);
6006 error = cancel_lock(ls, lkb, &args);
6008 if (error == -DLM_ECANCEL)
6010 /* from validate_unlock_args() */
6011 if (error == -EBUSY)
6014 trace_dlm_unlock_end(ls, lkb, flags, error);
6017 dlm_unlock_recovery(ls);
6022 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6024 struct dlm_lkb *lkb;
6025 struct dlm_args args;
6026 struct dlm_user_args *ua;
6030 dlm_lock_recovery(ls);
6032 error = find_lkb(ls, lkid, &lkb);
6036 trace_dlm_unlock_start(ls, lkb, flags);
6040 error = set_unlock_args(flags, ua, &args);
6044 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6046 r = lkb->lkb_resource;
6050 error = validate_unlock_args(lkb, &args);
6053 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6055 error = _cancel_lock(r, lkb);
6060 if (error == -DLM_ECANCEL)
6062 /* from validate_unlock_args() */
6063 if (error == -EBUSY)
6066 trace_dlm_unlock_end(ls, lkb, flags, error);
6069 dlm_unlock_recovery(ls);
6073 /* lkb's that are removed from the waiters list by revert are just left on the
6074 orphans list with the granted orphan locks, to be freed by purge */
6076 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6078 struct dlm_args args;
6081 hold_lkb(lkb); /* reference for the ls_orphans list */
6082 mutex_lock(&ls->ls_orphans_mutex);
6083 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6084 mutex_unlock(&ls->ls_orphans_mutex);
6086 set_unlock_args(0, lkb->lkb_ua, &args);
6088 error = cancel_lock(ls, lkb, &args);
6089 if (error == -DLM_ECANCEL)
6094 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6095 granted. Regardless of what rsb queue the lock is on, it's removed and
6096 freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
6097 if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6099 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6101 struct dlm_args args;
6104 set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6105 lkb->lkb_ua, &args);
6107 error = unlock_lock(ls, lkb, &args);
6108 if (error == -DLM_EUNLOCK)
6113 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6114 (which does lock_rsb) due to deadlock with receiving a message that does
6115 lock_rsb followed by dlm_user_add_cb() */
6117 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6118 struct dlm_user_proc *proc)
6120 struct dlm_lkb *lkb = NULL;
6122 spin_lock(&ls->ls_clear_proc_locks);
6123 if (list_empty(&proc->locks))
6126 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6127 list_del_init(&lkb->lkb_ownqueue);
6129 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6130 lkb->lkb_flags |= DLM_IFL_ORPHAN;
6132 lkb->lkb_flags |= DLM_IFL_DEAD;
6134 spin_unlock(&ls->ls_clear_proc_locks);
6138 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6139 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6140 which we clear here. */
6142 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6143 list, and no more device_writes should add lkb's to proc->locks list; so we
6144 shouldn't need to take asts_spin or locks_spin here. this assumes that
6145 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6148 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6150 struct dlm_lkb *lkb, *safe;
6152 dlm_lock_recovery(ls);
6155 lkb = del_proc_lock(ls, proc);
6159 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6160 orphan_proc_lock(ls, lkb);
6162 unlock_proc_lock(ls, lkb);
6164 /* this removes the reference for the proc->locks list
6165 added by dlm_user_request, it may result in the lkb
6171 spin_lock(&ls->ls_clear_proc_locks);
6173 /* in-progress unlocks */
6174 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6175 list_del_init(&lkb->lkb_ownqueue);
6176 lkb->lkb_flags |= DLM_IFL_DEAD;
6180 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6181 dlm_purge_lkb_callbacks(lkb);
6182 list_del_init(&lkb->lkb_cb_list);
6186 spin_unlock(&ls->ls_clear_proc_locks);
6187 dlm_unlock_recovery(ls);
6190 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6192 struct dlm_lkb *lkb, *safe;
6196 spin_lock(&proc->locks_spin);
6197 if (!list_empty(&proc->locks)) {
6198 lkb = list_entry(proc->locks.next, struct dlm_lkb,
6200 list_del_init(&lkb->lkb_ownqueue);
6202 spin_unlock(&proc->locks_spin);
6207 lkb->lkb_flags |= DLM_IFL_DEAD;
6208 unlock_proc_lock(ls, lkb);
6209 dlm_put_lkb(lkb); /* ref from proc->locks list */
6212 spin_lock(&proc->locks_spin);
6213 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6214 list_del_init(&lkb->lkb_ownqueue);
6215 lkb->lkb_flags |= DLM_IFL_DEAD;
6218 spin_unlock(&proc->locks_spin);
6220 spin_lock(&proc->asts_spin);
6221 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6222 dlm_purge_lkb_callbacks(lkb);
6223 list_del_init(&lkb->lkb_cb_list);
6226 spin_unlock(&proc->asts_spin);
6229 /* pid of 0 means purge all orphans */
6231 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6233 struct dlm_lkb *lkb, *safe;
6235 mutex_lock(&ls->ls_orphans_mutex);
6236 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6237 if (pid && lkb->lkb_ownpid != pid)
6239 unlock_proc_lock(ls, lkb);
6240 list_del_init(&lkb->lkb_ownqueue);
6243 mutex_unlock(&ls->ls_orphans_mutex);
6246 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6248 struct dlm_message *ms;
6249 struct dlm_mhandle *mh;
6252 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6253 DLM_MSG_PURGE, &ms, &mh, GFP_NOFS);
6256 ms->m_nodeid = cpu_to_le32(nodeid);
6257 ms->m_pid = cpu_to_le32(pid);
6259 return send_message(mh, ms, NULL, 0);
6262 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6263 int nodeid, int pid)
6267 if (nodeid && (nodeid != dlm_our_nodeid())) {
6268 error = send_purge(ls, nodeid, pid);
6270 dlm_lock_recovery(ls);
6271 if (pid == current->pid)
6272 purge_proc_locks(ls, proc);
6274 do_purge(ls, nodeid, pid);
6275 dlm_unlock_recovery(ls);
6280 /* debug functionality */
6281 int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len,
6282 int lkb_nodeid, unsigned int lkb_flags, int lkb_status)
6284 struct dlm_lksb *lksb;
6285 struct dlm_lkb *lkb;
6289 /* we currently can't set a valid user lock */
6290 if (lkb_flags & DLM_IFL_USER)
6293 lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
6297 error = _create_lkb(ls, &lkb, lkb_id, lkb_id + 1);
6303 lkb->lkb_flags = lkb_flags;
6304 lkb->lkb_nodeid = lkb_nodeid;
6305 lkb->lkb_lksb = lksb;
6306 /* user specific pointer, just don't have it NULL for kernel locks */
6307 if (~lkb_flags & DLM_IFL_USER)
6308 lkb->lkb_astparam = (void *)0xDEADBEEF;
6310 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
6319 add_lkb(r, lkb, lkb_status);
6326 int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id,
6327 int mstype, int to_nodeid)
6329 struct dlm_lkb *lkb;
6332 error = find_lkb(ls, lkb_id, &lkb);
6336 error = add_to_waiters(lkb, mstype, to_nodeid);