[Cluster-devel] [PATCH dlm-tool 13/14] dlm_controld: plock log lock state
Andreas Gruenbacher
agruenba at redhat.com
Fri Mar 3 13:38:45 UTC 2023
Alexx,
can you please prefix this patch with the following to make this easier
to read?
Thanks,
Andreas
--
dlm_controld: pass lockspace and lock number to add_lock()
The next patch will make use of the additional arguments.
---
dlm_controld/plock.c | 41 ++++++++++++++++++++++-------------------
1 file changed, 22 insertions(+), 19 deletions(-)
diff --git a/dlm_controld/plock.c b/dlm_controld/plock.c
index b93863f7..6709d205 100644
--- a/dlm_controld/plock.c
+++ b/dlm_controld/plock.c
@@ -466,8 +466,9 @@ static int is_conflict(struct resource *r, struct dlm_plock_info *in, int get)
return 0;
}
-static int add_lock(struct resource *r, uint32_t nodeid, uint64_t owner,
- uint32_t pid, int ex, uint64_t start, uint64_t end)
+static int add_lock(const struct lockspace *ls, struct resource *r,
+ uint32_t nodeid, uint64_t owner, uint32_t pid,
+ int ex, uint64_t start, uint64_t end, uint64_t number)
{
struct posix_lock *po;
@@ -491,8 +492,8 @@ static int add_lock(struct resource *r, uint32_t nodeid, uint64_t owner,
1. add new lock for non-overlap area of RE, orig mode
2. convert RE to RN range and mode */
-static int lock_case1(struct posix_lock *po, struct resource *r,
- struct dlm_plock_info *in)
+static int lock_case1(const struct lockspace *ls, struct posix_lock *po,
+ struct resource *r, struct dlm_plock_info *in)
{
uint64_t start2, end2;
int rv;
@@ -508,7 +509,8 @@ static int lock_case1(struct posix_lock *po, struct resource *r,
po->end = in->end;
po->ex = in->ex;
- rv = add_lock(r, in->nodeid, in->owner, in->pid, !in->ex, start2, end2);
+ rv = add_lock(ls, r, in->nodeid, in->owner, in->pid, !in->ex, start2,
+ end2, in->number);
out:
return rv;
}
@@ -518,19 +520,20 @@ static int lock_case1(struct posix_lock *po, struct resource *r,
2. add new lock for back fragment, orig mode
3. convert RE to RN range and mode */
-static int lock_case2(struct posix_lock *po, struct resource *r,
- struct dlm_plock_info *in)
+static int lock_case2(const struct lockspace *ls, struct posix_lock *po,
+ struct resource *r, struct dlm_plock_info *in)
{
int rv;
- rv = add_lock(r, in->nodeid, in->owner, in->pid,
- !in->ex, po->start, in->start - 1);
+ rv = add_lock(ls, r, in->nodeid, in->owner, in->pid,
+ !in->ex, po->start, in->start - 1,
+ in->number);
if (rv)
goto out;
- rv = add_lock(r, in->nodeid, in->owner, in->pid,
- !in->ex, in->end + 1, po->end);
+ rv = add_lock(ls, r, in->nodeid, in->owner, in->pid,
+ !in->ex, in->end + 1, po->end, in->number);
if (rv)
goto out;
@@ -569,14 +572,14 @@ static int lock_internal(struct lockspace *ls, struct resource *r,
if (po->ex == in->ex)
goto out;
- rv = lock_case1(po, r, in);
+ rv = lock_case1(ls, po, r, in);
goto out;
case 2:
if (po->ex == in->ex)
goto out;
- rv = lock_case2(po, r, in);
+ rv = lock_case2(ls, po, r, in);
goto out;
case 3:
@@ -597,8 +600,8 @@ static int lock_internal(struct lockspace *ls, struct resource *r,
}
}
- rv = add_lock(r, in->nodeid, in->owner, in->pid,
- in->ex, in->start, in->end);
+ rv = add_lock(ls, r, in->nodeid, in->owner, in->pid,
+ in->ex, in->start, in->end, in->number);
out:
return rv;
@@ -638,8 +641,8 @@ static int unlock_internal(struct lockspace *ls, struct resource *r,
/* RN within RE - shrink and update RE to be front
* fragment, and add a new lock for back fragment */
- rv = add_lock(r, in->nodeid, in->owner, in->pid,
- po->ex, in->end + 1, po->end);
+ rv = add_lock(ls, r, in->nodeid, in->owner, in->pid,
+ po->ex, in->end + 1, po->end, in->number);
po->end = in->start - 1;
goto out;
@@ -1346,8 +1349,8 @@ static void _receive_sync(struct lockspace *ls, struct dlm_header *hd, int len)
}
if (hd->type == DLM_MSG_PLOCK_SYNC_LOCK)
- add_lock(r, info.nodeid, info.owner, info.pid, info.ex,
- info.start, info.end);
+ add_lock(ls, r, info.nodeid, info.owner, info.pid, info.ex,
+ info.start, info.end, info.number);
else if (hd->type == DLM_MSG_PLOCK_SYNC_WAITER)
add_waiter(ls, r, &info);
}
--
2.39.0
More information about the Cluster-devel
mailing list