[Cluster-devel] cluster/cmirror-kernel/src dm-cmirror-client.c ...
jbrassow at sourceware.org
jbrassow at sourceware.org
Mon Feb 26 17:38:08 UTC 2007
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL4
Changes by: jbrassow at sourceware.org 2007-02-26 17:38:06
Modified files:
cmirror-kernel/src: dm-cmirror-client.c dm-cmirror-server.c
Log message:
add locking around the log list. There was a small window of opportunity
for the log server to look up a log in the list while another entry was
being deleted (bad for the server).
Bug 229715 Processed: cmirror panic in dm_cmirror:cluster_log_serverd
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/src/dm-cmirror-client.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.1.2.38&r2=1.1.2.39
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/src/dm-cmirror-server.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.1.2.24&r2=1.1.2.25
--- cluster/cmirror-kernel/src/Attic/dm-cmirror-client.c 2007/02/20 19:35:10 1.1.2.38
+++ cluster/cmirror-kernel/src/Attic/dm-cmirror-client.c 2007/02/26 17:38:06 1.1.2.39
@@ -28,6 +28,7 @@
#include "dm-cmirror-server.h"
#include "dm-cmirror-cman.h"
+spinlock_t log_list_lock;
LIST_HEAD(log_list_head);
struct region_state {
@@ -635,6 +636,7 @@
atomic_set(&lc->in_sync, -1);
lc->uuid_ref = 1;
+ spin_lock(&log_list_lock);
list_for_each_entry(tmp_lc, &log_list_head, log_list){
if(!strncmp(tmp_lc->uuid, lc->uuid, MAX_NAME_LEN)){
lc->uuid_ref = (lc->uuid_ref > tmp_lc->uuid_ref) ?
@@ -647,6 +649,7 @@
lc->uuid_ref);
list_add(&lc->log_list, &log_list_head);
+ spin_unlock(&log_list_lock);
INIT_LIST_HEAD(&lc->region_users);
lc->server_id = 0xDEAD;
@@ -757,7 +760,9 @@
if (!list_empty(&clear_region_list))
DMINFO("Leaving while clear region requests remain.");
+ spin_lock(&log_list_lock);
list_del_init(&lc->log_list);
+ spin_unlock(&log_list_lock);
if ((lc->server_id == my_id) && !atomic_read(&lc->suspended))
consult_server(lc, 0, LRT_MASTER_LEAVING, NULL);
@@ -1204,9 +1209,11 @@
atomic_set(&suspend_client, 1);
+ spin_lock(&log_list_lock);
list_for_each_entry(lc, &log_list_head, log_list) {
atomic_set(&lc->in_sync, 0);
}
+ spin_unlock(&log_list_lock);
if (likely(!shutting_down))
suspend_server();
@@ -1238,6 +1245,7 @@
switch(type){
case SERVICE_NODE_LEAVE:
case SERVICE_NODE_FAILED:
+ spin_lock(&log_list_lock);
list_for_each_entry(lc, &log_list_head, log_list){
for(i=0, server = 0xDEAD; i < count; i++){
if(lc->server_id == nodeids[i]){
@@ -1247,6 +1255,8 @@
/* ATTENTION -- need locking around this ? */
lc->server_id = server;
}
+ spin_unlock(&log_list_lock);
+
break;
case SERVICE_NODE_JOIN:
break;
@@ -1387,6 +1397,7 @@
INIT_LIST_HEAD(&marked_region_list);
spin_lock_init(®ion_state_lock);
+ spin_lock_init(&log_list_lock);
region_state_pool = mempool_create(20, region_state_alloc,
region_state_free, NULL);
if(!region_state_pool){
--- cluster/cmirror-kernel/src/Attic/dm-cmirror-server.c 2007/02/21 17:14:44 1.1.2.24
+++ cluster/cmirror-kernel/src/Attic/dm-cmirror-server.c 2007/02/26 17:38:06 1.1.2.25
@@ -47,6 +47,7 @@
static atomic_t _do_requests;
static int debug_disk_write = 0;
+extern spinlock_t log_list_lock;
extern struct list_head log_list_head;
static void *region_user_alloc(int gfp_mask, void *pool_data){
@@ -649,6 +650,7 @@
static struct log_c *get_log_context(char *uuid, int uuid_ref){
struct log_c *lc, *r = NULL;
+ spin_lock(&log_list_lock);
list_for_each_entry(lc, &log_list_head, log_list){
if (!strncmp(lc->uuid, uuid, MAX_NAME_LEN) &&
(uuid_ref == lc->uuid_ref)) {
@@ -658,6 +660,7 @@
r = lc;
}
}
+ spin_unlock(&log_list_lock);
return r;
}
@@ -1079,6 +1082,7 @@
if (atomic_read(&restart_event_type) == SERVICE_NODE_FAILED)
DMINFO("A cluster mirror log member has failed.");
+ spin_lock(&log_list_lock);
list_for_each_entry(lc, &log_list_head, log_list){
if(lc->server_id == my_id){
if (atomic_read(&lc->suspended)) {
@@ -1088,6 +1092,8 @@
}
}
}
+ spin_unlock(&log_list_lock);
+
break;
default:
/* Someone has joined, or there is no event */
More information about the Cluster-devel
mailing list