[Cluster-devel] Cluster Project branch, RHEL5, updated. cmirror_1_1_15-117-g151f914

rpeterso at sourceware.org rpeterso at sourceware.org
Wed Jun 18 19:31:30 UTC 2008


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Cluster Project".

http://sources.redhat.com/git/gitweb.cgi?p=cluster.git;a=commitdiff;h=151f91487730addf1edd1bcf47e4ee5a4cb2ae78

The branch, RHEL5 has been updated
       via  151f91487730addf1edd1bcf47e4ee5a4cb2ae78 (commit)
       via  343655ae1fccc393bd92128ec90058f7de722e77 (commit)
      from  ce02ccaa6fdbb1192c5e9714c06347f7f37bf0de (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 151f91487730addf1edd1bcf47e4ee5a4cb2ae78
Author: Bob Peterson <rpeterso at redhat.com>
Date:   Wed Jun 18 14:07:26 2008 -0500

    452004: gfs: BUG: unable to handle kernel paging request.
    This is a gfs crosswrite from gfs2, to be included with
    446085 in RHEL5.

commit 343655ae1fccc393bd92128ec90058f7de722e77
Author: Bob Peterson <rpeterso at redhat.com>
Date:   Tue May 20 09:37:53 2008 -0500

    bz 446085: Back-port faster bitfit algorithm from gfs2 for better
    performance.

-----------------------------------------------------------------------

Summary of changes:
 gfs-kernel/src/gfs/bits.c |   85 +++++++++++++++++++++++++++++++-------------
 gfs-kernel/src/gfs/bits.h |    3 +-
 gfs-kernel/src/gfs/rgrp.c |    3 +-
 3 files changed, 62 insertions(+), 29 deletions(-)

diff --git a/gfs-kernel/src/gfs/bits.c b/gfs-kernel/src/gfs/bits.c
index da08a23..7702ea1 100644
--- a/gfs-kernel/src/gfs/bits.c
+++ b/gfs-kernel/src/gfs/bits.c
@@ -31,6 +31,16 @@
 #include "gfs.h"
 #include "bits.h"
 
+#if BITS_PER_LONG == 32
+#define LBITMASK   (0x55555555UL)
+#define LBITSKIP55 (0x55555555UL)
+#define LBITSKIP00 (0x00000000UL)
+#else
+#define LBITMASK   (0x5555555555555555UL)
+#define LBITSKIP55 (0x5555555555555555UL)
+#define LBITSKIP00 (0x0000000000000000UL)
+#endif
+
 static const char valid_change[16] = {
 	        /* current */
 	/* n */ 0, 1, 1, 1,
@@ -115,41 +125,66 @@ gfs_testbit(struct gfs_rgrpd *rgd,
  */
 
 uint32_t
-gfs_bitfit(struct gfs_rgrpd *rgd,
-	   unsigned char *buffer, unsigned int buflen,
+gfs_bitfit(unsigned char *buffer, unsigned int buflen,
 	   uint32_t goal, unsigned char old_state)
 {
-	unsigned char *byte, *end, alloc;
-	uint32_t blk = goal;
-	unsigned int bit;
-
-	byte = buffer + (goal / GFS_NBBY);
-	bit = (goal % GFS_NBBY) * GFS_BIT_SIZE;
-	end = buffer + buflen;
-	alloc = (old_state & 1) ? 0 : 0x55;
-
+	const u8 *byte, *start, *end;
+	int bit, startbit;
+	u32 g1, g2, misaligned;
+	unsigned long *plong;
+	unsigned long lskipval;
+
+	lskipval = (old_state & GFS_BLKST_USED) ? LBITSKIP00 : LBITSKIP55;
+	g1 = (goal / GFS_NBBY);
+	start = buffer + g1;
+	byte = start;
+        end = buffer + buflen;
+	g2 = ALIGN(g1, sizeof(unsigned long));
+	plong = (unsigned long *)(buffer + g2);
+	startbit = bit = (goal % GFS_NBBY) * GFS_BIT_SIZE;
+	misaligned = g2 - g1;
+	if (!misaligned)
+		goto ulong_aligned;
+/* parse the bitmap a byte at a time */
+misaligned:
 	while (byte < end) {
-		if ((*byte & 0x55) == alloc) {
-			blk += (8 - bit) >> 1;
-
-			bit = 0;
-			byte++;
-
-			continue;
+		if (((*byte >> bit) & GFS_BIT_MASK) == old_state) {
+			return goal +
+				(((byte - start) * GFS_NBBY) +
+				 ((bit - startbit) >> 1));
 		}
-
-		if (((*byte >> bit) & GFS_BIT_MASK) == old_state)
-			return blk;
-
 		bit += GFS_BIT_SIZE;
-		if (bit >= 8) {
+		if (bit >= GFS_NBBY * GFS_BIT_SIZE) {
 			bit = 0;
 			byte++;
+			misaligned--;
+			if (!misaligned) {
+				plong = (unsigned long *)byte;
+				goto ulong_aligned;
+			}
 		}
-
-		blk++;
 	}
+	return BFITNOENT;
 
+/* parse the bitmap a unsigned long at a time */
+ulong_aligned:
+	/* Stop at "end - 1" or else prefetch can go past the end and segfault.
+	   We could "if" it but we'd lose some of the performance gained.
+	   This way will only slow down searching the very last 4/8 bytes
+	   depending on architecture.  I've experimented with several ways
+	   of writing this section such as using an else before the goto
+	   but this one seems to be the fastest. */
+	while ((unsigned char *)plong < end - sizeof(unsigned long)) {
+		prefetch(plong + 1);
+		if (((*plong) & LBITMASK) != lskipval)
+			break;
+		plong++;
+	}
+	if ((unsigned char *)plong < end) {
+		byte = (const u8 *)plong;
+		misaligned += sizeof(unsigned long) - 1;
+		goto misaligned;
+	}
 	return BFITNOENT;
 }
 
diff --git a/gfs-kernel/src/gfs/bits.h b/gfs-kernel/src/gfs/bits.h
index ed38102..9990bae 100644
--- a/gfs-kernel/src/gfs/bits.h
+++ b/gfs-kernel/src/gfs/bits.h
@@ -22,8 +22,7 @@ void gfs_setbit(struct gfs_rgrpd *rgd,
 unsigned char gfs_testbit(struct gfs_rgrpd *rgd,
 			  unsigned char *buffer, unsigned int buflen,
 			  uint32_t block);
-uint32_t gfs_bitfit(struct gfs_rgrpd *rgd,
-		    unsigned char *buffer, unsigned int buflen,
+uint32_t gfs_bitfit(unsigned char *buffer, unsigned int buflen,
 		    uint32_t goal, unsigned char old_state);
 uint32_t gfs_bitcount(struct gfs_rgrpd *rgd,
 		      unsigned char *buffer, unsigned int buflen,
diff --git a/gfs-kernel/src/gfs/rgrp.c b/gfs-kernel/src/gfs/rgrp.c
index c8667a0..90c00d0 100644
--- a/gfs-kernel/src/gfs/rgrp.c
+++ b/gfs-kernel/src/gfs/rgrp.c
@@ -1443,8 +1443,7 @@ blkalloc_internal(struct gfs_rgrpd *rgd,
 	   allocatable block anywhere else, we want to be able wrap around and
 	   search in the first part of our first-searched bit block.  */
 	for (x = 0; x <= length; x++) {
-		blk = gfs_bitfit(rgd,
-				 rgd->rd_bh[buf]->b_data + bits->bi_offset,
+		blk = gfs_bitfit(rgd->rd_bh[buf]->b_data + bits->bi_offset,
 				 bits->bi_len, goal, old_state);
 		if (blk != BFITNOENT)
 			break;


hooks/post-receive
--
Cluster Project




More information about the Cluster-devel mailing list