rpms/kernel/devel kernel.spec, 1.359, 1.360 linux-2.6-ext4-jbd2-patch-queue.patch, 1.1, 1.2

Eric Sandeen (sandeen) fedora-extras-commits at redhat.com
Mon Jan 14 16:29:53 UTC 2008


Author: sandeen

Update of /cvs/pkgs/rpms/kernel/devel
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv18385

Modified Files:
	kernel.spec linux-2.6-ext4-jbd2-patch-queue.patch 
Log Message:
* Mon Jan 14 2008 Eric Sandeen <sandeen at redhat.com>
- Update ext4 patch to latest stable patch queue



Index: kernel.spec
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/kernel.spec,v
retrieving revision 1.359
retrieving revision 1.360
diff -u -r1.359 -r1.360
--- kernel.spec	12 Jan 2008 15:59:24 -0000	1.359
+++ kernel.spec	14 Jan 2008 16:29:01 -0000	1.360
@@ -1756,6 +1756,9 @@
 %kernel_variant_files -a /%{image_install_path}/xen*-%{KVERREL} -e /etc/ld.so.conf.d/kernelcap-%{KVERREL}.conf %{with_xen} xen
 
 %changelog
+* Mon Jan 14 2008 Eric Sandeen <sandeen at redhat.com>
+- Update ext4 patch to latest stable patch queue
+
 * Sat Jan 12 2008 Kyle McMartin <kmcmartin at redhat.com>
 - 2.6.24-rc7-git4
 

linux-2.6-ext4-jbd2-patch-queue.patch:

Index: linux-2.6-ext4-jbd2-patch-queue.patch
===================================================================
RCS file: /cvs/pkgs/rpms/kernel/devel/linux-2.6-ext4-jbd2-patch-queue.patch,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- linux-2.6-ext4-jbd2-patch-queue.patch	10 Jan 2008 18:38:46 -0000	1.1
+++ linux-2.6-ext4-jbd2-patch-queue.patch	14 Jan 2008 16:29:01 -0000	1.2
@@ -444,8 +444,36 @@
  	int has_super = 0;
  
  	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
-@@ -1586,6 +1766,9 @@ static ext4_fsblk_t descriptor_loc(struc
+@@ -1584,8 +1764,37 @@ static ext4_fsblk_t descriptor_loc(struc
+ 	return (has_super + ext4_group_first_block_no(sb, bg));
+ }
  
++/**
++ * ext4_get_stripe_size: Get the stripe size.
++ * @sbi: In memory super block info
++ *
++ * If we have specified it via mount option, then
++ * use the mount option value. If the value specified at mount time is
++ * greater than the blocks per group use the super block value.
++ * If the super block value is greater than blocks per group return 0.
++ * Allocator needs it be less than blocks per group.
++ *
++ */
++static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
++{
++	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
++	unsigned long stripe_width = le32_to_cpu(sbi->s_es->s_raid_stripe_width);
++
++	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) {
++		return sbi->s_stripe;
++	} else if (stripe_width <= sbi->s_blocks_per_group) {
++		return stripe_width;
++	} else if (stride <= sbi->s_blocks_per_group) {
++		return stride;
++	}
++
++	return 0;
++}
  
  static int ext4_fill_super (struct super_block *sb, void *data, int silent)
 +				__releases(kernel_sem)
@@ -454,7 +482,15 @@
  {
  	struct buffer_head * bh;
  	struct ext4_super_block *es = NULL;
-@@ -1624,6 +1807,11 @@ static int ext4_fill_super (struct super
+@@ -1599,7 +1808,6 @@ static int ext4_fill_super (struct super
+ 	unsigned long def_mount_opts;
+ 	struct inode *root;
+ 	int blocksize;
+-	int hblock;
+ 	int db_count;
+ 	int i;
+ 	int needs_recovery;
+@@ -1624,6 +1832,11 @@ static int ext4_fill_super (struct super
  		goto out_fail;
  	}
  
@@ -466,7 +502,7 @@
  	/*
  	 * The ext4 superblock will not be buffer aligned for other than 1kB
  	 * block sizes.  We need to calculate the offset from buffer start.
-@@ -1674,10 +1862,10 @@ static int ext4_fill_super (struct super
+@@ -1674,10 +1887,10 @@ static int ext4_fill_super (struct super
  
  	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
  		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
@@ -480,7 +516,7 @@
  
  	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
  	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
-@@ -1689,6 +1877,11 @@ static int ext4_fill_super (struct super
+@@ -1689,6 +1902,11 @@ static int ext4_fill_super (struct super
  	 * User -o noextents to turn it off
  	 */
  	set_opt(sbi->s_mount_opt, EXTENTS);
@@ -492,7 +528,7 @@
  
  	if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
  			    NULL, 0))
-@@ -1723,6 +1916,19 @@ static int ext4_fill_super (struct super
+@@ -1723,6 +1941,19 @@ static int ext4_fill_super (struct super
  		       sb->s_id, le32_to_cpu(features));
  		goto failed_mount;
  	}
@@ -512,7 +548,33 @@
  	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
  
  	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
-@@ -1764,6 +1970,7 @@ static int ext4_fill_super (struct super
+@@ -1733,20 +1964,16 @@ static int ext4_fill_super (struct super
+ 		goto failed_mount;
+ 	}
+ 
+-	hblock = bdev_hardsect_size(sb->s_bdev);
+ 	if (sb->s_blocksize != blocksize) {
+-		/*
+-		 * Make sure the blocksize for the filesystem is larger
+-		 * than the hardware sectorsize for the machine.
+-		 */
+-		if (blocksize < hblock) {
+-			printk(KERN_ERR "EXT4-fs: blocksize %d too small for "
+-			       "device blocksize %d.\n", blocksize, hblock);
++
++		/* Validate the filesystem blocksize */
++		if (!sb_set_blocksize(sb, blocksize)) {
++			printk(KERN_ERR "EXT4-fs: bad block size %d.\n",
++					blocksize);
+ 			goto failed_mount;
+ 		}
+ 
+ 		brelse (bh);
+-		sb_set_blocksize(sb, blocksize);
+ 		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
+ 		offset = do_div(logical_sb_block, blocksize);
+ 		bh = sb_bread(sb, logical_sb_block);
+@@ -1764,6 +1991,7 @@ static int ext4_fill_super (struct super
  		}
  	}
  
@@ -520,7 +582,7 @@
  	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits);
  
  	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
-@@ -1838,6 +2045,17 @@ static int ext4_fill_super (struct super
+@@ -1838,6 +2066,17 @@ static int ext4_fill_super (struct super
  
  	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
  		goto cantfind_ext4;
@@ -538,7 +600,16 @@
  	blocks_count = (ext4_blocks_count(es) -
  			le32_to_cpu(es->s_first_data_block) +
  			EXT4_BLOCKS_PER_GROUP(sb) - 1);
-@@ -1944,6 +2162,21 @@ static int ext4_fill_super (struct super
+@@ -1900,6 +2139,8 @@ static int ext4_fill_super (struct super
+ 	sbi->s_rsv_window_head.rsv_goal_size = 0;
+ 	ext4_rsv_window_add(sb, &sbi->s_rsv_window_head);
+ 
++	sbi->s_stripe = ext4_get_stripe_size(sbi);
++
+ 	/*
+ 	 * set up enough so that it can read an inode
+ 	 */
+@@ -1944,6 +2185,21 @@ static int ext4_fill_super (struct super
  		goto failed_mount4;
  	}
  
@@ -560,7 +631,7 @@
  	/* We have now updated the journal if required, so we can
  	 * validate the data journaling mode. */
  	switch (test_opt(sb, DATA_FLAGS)) {
-@@ -2044,6 +2277,7 @@ static int ext4_fill_super (struct super
+@@ -2044,6 +2300,7 @@ static int ext4_fill_super (struct super
  		"writeback");
  
  	ext4_ext_init(sb);
@@ -568,7 +639,7 @@
  
  	lock_kernel();
  	return 0;
-@@ -2673,7 +2907,7 @@ static int ext4_statfs (struct dentry * 
+@@ -2673,7 +2930,7 @@ static int ext4_statfs (struct dentry * 
  	if (test_opt(sb, MINIX_DF)) {
  		sbi->s_overhead_last = 0;
  	} else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
@@ -577,7 +648,7 @@
  		ext4_fsblk_t overhead = 0;
  		smp_rmb();
  
-@@ -2909,7 +3143,7 @@ static ssize_t ext4_quota_read(struct su
+@@ -2909,7 +3166,7 @@ static ssize_t ext4_quota_read(struct su
  			       size_t len, loff_t off)
  {
  	struct inode *inode = sb_dqopt(sb)->files[type];
@@ -586,7 +657,7 @@
  	int err = 0;
  	int offset = off & (sb->s_blocksize - 1);
  	int tocopy;
-@@ -2947,7 +3181,7 @@ static ssize_t ext4_quota_write(struct s
+@@ -2947,7 +3204,7 @@ static ssize_t ext4_quota_write(struct s
  				const char *data, size_t len, loff_t off)
  {
  	struct inode *inode = sb_dqopt(sb)->files[type];
@@ -595,7 +666,7 @@
  	int err = 0;
  	int offset = off & (sb->s_blocksize - 1);
  	int tocopy;
-@@ -3002,7 +3236,6 @@ out:
+@@ -3002,7 +3259,6 @@ out:
  		i_size_write(inode, off+len-towrite);
  		EXT4_I(inode)->i_disksize = inode->i_size;
  	}
@@ -603,7 +674,7 @@
  	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  	ext4_mark_inode_dirty(handle, inode);
  	mutex_unlock(&inode->i_mutex);
-@@ -3027,9 +3260,15 @@ static struct file_system_type ext4dev_f
+@@ -3027,9 +3283,15 @@ static struct file_system_type ext4dev_f
  
  static int __init init_ext4_fs(void)
  {
@@ -620,7 +691,7 @@
  	err = init_inodecache();
  	if (err)
  		goto out1;
-@@ -3041,6 +3280,8 @@ out:
+@@ -3041,6 +3303,8 @@ out:
  	destroy_inodecache();
  out1:
  	exit_ext4_xattr();
@@ -629,7 +700,7 @@
  	return err;
  }
  
-@@ -3049,6 +3290,7 @@ static void __exit exit_ext4_fs(void)
+@@ -3049,6 +3313,7 @@ static void __exit exit_ext4_fs(void)
  	unregister_filesystem(&ext4dev_fs_type);
  	destroy_inodecache();
  	exit_ext4_xattr();
@@ -1497,15 +1568,7 @@
 ===================================================================
 --- linux-2.6.23.noarch.orig/fs/ext4/extents.c
 +++ linux-2.6.23.noarch/fs/ext4/extents.c
-@@ -27,6 +27,7 @@
-  *   - ext4*_error() should be used in some situations
-  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
-  *   - smart tree reduction
-+ *    stable boundary change
-  */
- 
- #include <linux/module.h>
-@@ -144,7 +145,7 @@ static int ext4_ext_dirty(handle_t *hand
+@@ -144,7 +144,7 @@ static int ext4_ext_dirty(handle_t *hand
  
  static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
  			      struct ext4_ext_path *path,
@@ -1514,7 +1577,7 @@
  {
  	struct ext4_inode_info *ei = EXT4_I(inode);
  	ext4_fsblk_t bg_start;
-@@ -367,13 +368,14 @@ static void ext4_ext_drop_refs(struct ex
+@@ -367,13 +367,14 @@ static void ext4_ext_drop_refs(struct ex
   * the header must be checked before calling this
   */
  static void
@@ -1531,7 +1594,7 @@
  
  	l = EXT_FIRST_INDEX(eh) + 1;
  	r = EXT_LAST_INDEX(eh);
-@@ -425,7 +427,8 @@ ext4_ext_binsearch_idx(struct inode *ino
+@@ -425,7 +426,8 @@ ext4_ext_binsearch_idx(struct inode *ino
   * the header must be checked before calling this
   */
  static void
@@ -1541,7 +1604,7 @@
  {
  	struct ext4_extent_header *eh = path->p_hdr;
  	struct ext4_extent *r, *l, *m;
-@@ -438,7 +441,7 @@ ext4_ext_binsearch(struct inode *inode, 
+@@ -438,7 +440,7 @@ ext4_ext_binsearch(struct inode *inode, 
  		return;
  	}
  
@@ -1550,7 +1613,7 @@
  
  	l = EXT_FIRST_EXTENT(eh) + 1;
  	r = EXT_LAST_EXTENT(eh);
-@@ -494,7 +497,8 @@ int ext4_ext_tree_init(handle_t *handle,
+@@ -494,7 +496,8 @@ int ext4_ext_tree_init(handle_t *handle,
  }
  
  struct ext4_ext_path *
@@ -1560,7 +1623,7 @@
  {
  	struct ext4_extent_header *eh;
  	struct buffer_head *bh;
-@@ -763,7 +767,7 @@ static int ext4_ext_split(handle_t *hand
+@@ -763,7 +766,7 @@ static int ext4_ext_split(handle_t *hand
  	while (k--) {
  		oldblock = newblock;
  		newblock = ablocks[--a];
@@ -1569,7 +1632,7 @@
  		if (!bh) {
  			err = -EIO;
  			goto cleanup;
-@@ -783,9 +787,8 @@ static int ext4_ext_split(handle_t *hand
+@@ -783,9 +786,8 @@ static int ext4_ext_split(handle_t *hand
  		fidx->ei_block = border;
  		ext4_idx_store_pblock(fidx, oldblock);
  
@@ -1581,7 +1644,7 @@
  		/* copy indexes */
  		m = 0;
  		path[i].p_idx++;
-@@ -851,7 +854,7 @@ cleanup:
+@@ -851,7 +853,7 @@ cleanup:
  		for (i = 0; i < depth; i++) {
  			if (!ablocks[i])
  				continue;
@@ -1590,7 +1653,7 @@
  		}
  	}
  	kfree(ablocks);
-@@ -979,8 +982,8 @@ repeat:
+@@ -979,8 +981,8 @@ repeat:
  		/* refill path */
  		ext4_ext_drop_refs(path);
  		path = ext4_ext_find_extent(inode,
@@ -1601,7 +1664,7 @@
  		if (IS_ERR(path))
  			err = PTR_ERR(path);
  	} else {
-@@ -992,8 +995,8 @@ repeat:
+@@ -992,8 +994,8 @@ repeat:
  		/* refill path */
  		ext4_ext_drop_refs(path);
  		path = ext4_ext_find_extent(inode,
@@ -1612,7 +1675,7 @@
  		if (IS_ERR(path)) {
  			err = PTR_ERR(path);
  			goto out;
-@@ -1015,13 +1018,155 @@ out:
+@@ -1015,13 +1017,157 @@ out:
  }
  
  /*
@@ -1628,7 +1691,7 @@
 +{
 +	struct ext4_extent_idx *ix;
 +	struct ext4_extent *ex;
-+	int depth;
++	int depth, ee_len;
 +
 +	BUG_ON(path == NULL);
 +	depth = path->p_depth;
@@ -1642,6 +1705,7 @@
 +	 * first one in the file */
 +
 +	ex = path[depth].p_ext;
++	ee_len = ext4_ext_get_actual_len(ex);
 +	if (*logical < le32_to_cpu(ex->ee_block)) {
 +		BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
 +		while (--depth >= 0) {
@@ -1651,10 +1715,10 @@
 +		return 0;
 +	}
 +
-+	BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
++	BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
 +
-+	*logical = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1;
-+	*phys = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - 1;
++	*logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
++	*phys = ext_pblock(ex) + ee_len - 1;
 +	return 0;
 +}
 +
@@ -1674,7 +1738,7 @@
 +	struct ext4_extent_idx *ix;
 +	struct ext4_extent *ex;
 +	ext4_fsblk_t block;
-+	int depth;
++	int depth, ee_len;
 +
 +	BUG_ON(path == NULL);
 +	depth = path->p_depth;
@@ -1688,6 +1752,7 @@
 +	 * first one in the file */
 +
 +	ex = path[depth].p_ext;
++	ee_len = ext4_ext_get_actual_len(ex);
 +	if (*logical < le32_to_cpu(ex->ee_block)) {
 +		BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
 +		while (--depth >= 0) {
@@ -1699,7 +1764,7 @@
 +		return 0;
 +	}
 +
-+	BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
++	BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
 +
 +	if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
 +		/* next allocated block in this leaf */
@@ -1769,7 +1834,7 @@
  ext4_ext_next_allocated_block(struct ext4_ext_path *path)
  {
  	int depth;
-@@ -1054,7 +1199,7 @@ ext4_ext_next_allocated_block(struct ext
+@@ -1054,7 +1200,7 @@ ext4_ext_next_allocated_block(struct ext
   * ext4_ext_next_leaf_block:
   * returns first allocated block from next leaf or EXT_MAX_BLOCK
   */
@@ -1778,7 +1843,7 @@
  					struct ext4_ext_path *path)
  {
  	int depth;
-@@ -1072,7 +1217,7 @@ static unsigned ext4_ext_next_leaf_block
+@@ -1072,7 +1218,7 @@ static unsigned ext4_ext_next_leaf_block
  	while (depth >= 0) {
  		if (path[depth].p_idx !=
  				EXT_LAST_INDEX(path[depth].p_hdr))
@@ -1787,7 +1852,7 @@
  		depth--;
  	}
  
-@@ -1085,7 +1230,7 @@ static unsigned ext4_ext_next_leaf_block
+@@ -1085,7 +1231,7 @@ static unsigned ext4_ext_next_leaf_block
   * then we have to correct all indexes above.
   * TODO: do we need to correct tree in all cases?
   */
@@ -1796,7 +1861,16 @@
  				struct ext4_ext_path *path)
  {
  	struct ext4_extent_header *eh;
-@@ -1239,7 +1384,7 @@ unsigned int ext4_ext_check_overlap(stru
+@@ -1171,7 +1317,7 @@ ext4_can_extents_be_merged(struct inode 
+ 	if (ext1_ee_len + ext2_ee_len > max_len)
+ 		return 0;
+ #ifdef AGGRESSIVE_TEST
+-	if (le16_to_cpu(ex1->ee_len) >= 4)
++	if (ext1_ee_len >= 4)
+ 		return 0;
+ #endif
+ 
+@@ -1239,7 +1385,7 @@ unsigned int ext4_ext_check_overlap(stru
  				    struct ext4_extent *newext,
  				    struct ext4_ext_path *path)
  {
@@ -1805,7 +1879,7 @@
  	unsigned int depth, len1;
  	unsigned int ret = 0;
  
-@@ -1260,7 +1405,7 @@ unsigned int ext4_ext_check_overlap(stru
+@@ -1260,7 +1406,7 @@ unsigned int ext4_ext_check_overlap(stru
  			goto out;
  	}
  
@@ -1814,7 +1888,7 @@
  	if (b1 + len1 < b1) {
  		len1 = EXT_MAX_BLOCK - b1;
  		newext->ee_len = cpu_to_le16(len1);
-@@ -1290,7 +1435,8 @@ int ext4_ext_insert_extent(handle_t *han
+@@ -1290,7 +1436,8 @@ int ext4_ext_insert_extent(handle_t *han
  	struct ext4_extent *ex, *fex;
  	struct ext4_extent *nearex; /* nearest extent */
  	struct ext4_ext_path *npath = NULL;
@@ -1824,7 +1898,7 @@
  	unsigned uninitialized = 0;
  
  	BUG_ON(ext4_ext_get_actual_len(newext) == 0);
-@@ -1435,114 +1581,8 @@ cleanup:
+@@ -1435,114 +1582,8 @@ cleanup:
  	return err;
  }
  
@@ -1940,7 +2014,7 @@
  			__u32 len, ext4_fsblk_t start, int type)
  {
  	struct ext4_ext_cache *cex;
-@@ -1561,10 +1601,11 @@ ext4_ext_put_in_cache(struct inode *inod
+@@ -1561,10 +1602,11 @@ ext4_ext_put_in_cache(struct inode *inod
   */
  static void
  ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
@@ -1954,7 +2028,7 @@
  	struct ext4_extent *ex;
  
  	ex = path[depth].p_ext;
-@@ -1576,32 +1617,34 @@ ext4_ext_put_gap_in_cache(struct inode *
+@@ -1576,32 +1618,34 @@ ext4_ext_put_gap_in_cache(struct inode *
  	} else if (block < le32_to_cpu(ex->ee_block)) {
  		lblock = block;
  		len = le32_to_cpu(ex->ee_block) - block;
@@ -2002,7 +2076,7 @@
  			struct ext4_extent *ex)
  {
  	struct ext4_ext_cache *cex;
-@@ -1618,11 +1661,9 @@ ext4_ext_in_cache(struct inode *inode, u
+@@ -1618,11 +1662,9 @@ ext4_ext_in_cache(struct inode *inode, u
  		ex->ee_block = cpu_to_le32(cex->ec_block);
  		ext4_ext_store_pblock(ex, cex->ec_start);
  		ex->ee_len = cpu_to_le16(cex->ec_len);
@@ -2017,7 +2091,7 @@
  		return cex->ec_type;
  	}
  
-@@ -1636,7 +1677,7 @@ ext4_ext_in_cache(struct inode *inode, u
+@@ -1636,7 +1678,7 @@ ext4_ext_in_cache(struct inode *inode, u
   * It's used in truncate case only, thus all requests are for
   * last index in the block only.
   */
@@ -2026,7 +2100,7 @@
  			struct ext4_ext_path *path)
  {
  	struct buffer_head *bh;
-@@ -1657,7 +1698,7 @@ int ext4_ext_rm_idx(handle_t *handle, st
+@@ -1657,7 +1699,7 @@ int ext4_ext_rm_idx(handle_t *handle, st
  	ext_debug("index is empty, remove it, free block %llu\n", leaf);
  	bh = sb_find_get_block(inode->i_sb, leaf);
  	ext4_forget(handle, 1, inode, bh, leaf);
@@ -2035,7 +2109,7 @@
  	return err;
  }
  
-@@ -1666,7 +1707,7 @@ int ext4_ext_rm_idx(handle_t *handle, st
+@@ -1666,7 +1708,7 @@ int ext4_ext_rm_idx(handle_t *handle, st
   * This routine returns max. credits that the extent tree can consume.
   * It should be OK for low-performance paths like ->writepage()
   * To allow many writing processes to fit into a single transaction,
@@ -2044,7 +2118,7 @@
   * pass the actual path.
   */
  int ext4_ext_calc_credits_for_insert(struct inode *inode,
-@@ -1714,12 +1755,14 @@ int ext4_ext_calc_credits_for_insert(str
+@@ -1714,12 +1756,14 @@ int ext4_ext_calc_credits_for_insert(str
  
  static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
  				struct ext4_extent *ex,
@@ -2061,7 +2135,7 @@
  #ifdef EXTENTS_STATS
  	{
  		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-@@ -1738,22 +1781,23 @@ static int ext4_remove_blocks(handle_t *
+@@ -1738,22 +1782,23 @@ static int ext4_remove_blocks(handle_t *
  	if (from >= le32_to_cpu(ex->ee_block)
  	    && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
  		/* tail removal */
@@ -2090,7 +2164,7 @@
  			from, to, le32_to_cpu(ex->ee_block), ee_len);
  	}
  	return 0;
-@@ -1761,19 +1805,20 @@ static int ext4_remove_blocks(handle_t *
+@@ -1761,19 +1806,20 @@ static int ext4_remove_blocks(handle_t *
  
  static int
  ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
@@ -2115,7 +2189,7 @@
  	if (!path[depth].p_hdr)
  		path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
  	eh = path[depth].p_hdr;
-@@ -1904,7 +1949,7 @@ ext4_ext_more_to_rm(struct ext4_ext_path
+@@ -1904,7 +1950,7 @@ ext4_ext_more_to_rm(struct ext4_ext_path
  	return 1;
  }
  
@@ -2124,7 +2198,7 @@
  {
  	struct super_block *sb = inode->i_sb;
  	int depth = ext_depth(inode);
-@@ -1912,7 +1957,7 @@ int ext4_ext_remove_space(struct inode *
+@@ -1912,7 +1958,7 @@ int ext4_ext_remove_space(struct inode *
  	handle_t *handle;
  	int i = 0, err = 0;
  
@@ -2133,7 +2207,7 @@
  
  	/* probably first extent we're gonna free will be last in block */
  	handle = ext4_journal_start(inode, depth + 1);
-@@ -2094,9 +2139,9 @@ void ext4_ext_release(struct super_block
+@@ -2094,9 +2140,9 @@ void ext4_ext_release(struct super_block
   *   b> Splits in two extents: Write is happening at either end of the extent
   *   c> Splits in three extents: Somone is writing in middle of the extent
   */
@@ -2145,7 +2219,7 @@
  					unsigned long max_blocks)
  {
  	struct ext4_extent *ex, newex;
-@@ -2104,7 +2149,8 @@ int ext4_ext_convert_to_initialized(hand
+@@ -2104,7 +2150,8 @@ int ext4_ext_convert_to_initialized(hand
  	struct ext4_extent *ex2 = NULL;
  	struct ext4_extent *ex3 = NULL;
  	struct ext4_extent_header *eh;
@@ -2155,7 +2229,7 @@
  	ext4_fsblk_t newblock;
  	int err = 0;
  	int ret = 0;
-@@ -2225,8 +2271,13 @@ out:
+@@ -2225,8 +2272,13 @@ out:
  	return err ? err : allocated;
  }
  
@@ -2170,7 +2244,7 @@
  			unsigned long max_blocks, struct buffer_head *bh_result,
  			int create, int extend_disksize)
  {
-@@ -2236,11 +2287,11 @@ int ext4_ext_get_blocks(handle_t *handle
+@@ -2236,11 +2288,11 @@ int ext4_ext_get_blocks(handle_t *handle
  	ext4_fsblk_t goal, newblock;
  	int err = 0, depth, ret;
  	unsigned long allocated = 0;
@@ -2185,7 +2259,16 @@
  
  	/* check in cache */
  	goal = ext4_ext_in_cache(inode, iblock, &newex);
-@@ -2288,7 +2339,7 @@ int ext4_ext_get_blocks(handle_t *handle
+@@ -2260,7 +2312,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ 				   - le32_to_cpu(newex.ee_block)
+ 				   + ext_pblock(&newex);
+ 			/* number of remaining blocks in the extent */
+-			allocated = le16_to_cpu(newex.ee_len) -
++			allocated = ext4_ext_get_actual_len(&newex) -
+ 					(iblock - le32_to_cpu(newex.ee_block));
+ 			goto out;
+ 		} else {
+@@ -2288,7 +2340,7 @@ int ext4_ext_get_blocks(handle_t *handle
  
  	ex = path[depth].p_ext;
  	if (ex) {
@@ -2194,7 +2277,7 @@
  		ext4_fsblk_t ee_start = ext_pblock(ex);
  		unsigned short ee_len;
  
-@@ -2302,7 +2353,7 @@ int ext4_ext_get_blocks(handle_t *handle
+@@ -2302,7 +2354,7 @@ int ext4_ext_get_blocks(handle_t *handle
  			newblock = iblock - ee_block + ee_start;
  			/* number of remaining blocks in the extent */
  			allocated = ee_len - (iblock - ee_block);
@@ -2203,7 +2286,20 @@
  					ee_block, ee_len, newblock);
  
  			/* Do not put uninitialized extent in the cache */
-@@ -2347,8 +2398,15 @@ int ext4_ext_get_blocks(handle_t *handle
+@@ -2320,9 +2372,10 @@ int ext4_ext_get_blocks(handle_t *handle
+ 			ret = ext4_ext_convert_to_initialized(handle, inode,
+ 								path, iblock,
+ 								max_blocks);
+-			if (ret <= 0)
++			if (ret <= 0) {
++				err = ret;
+ 				goto out2;
+-			else
++			} else
+ 				allocated = ret;
+ 			goto outnew;
+ 		}
+@@ -2347,8 +2400,15 @@ int ext4_ext_get_blocks(handle_t *handle
  	if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
  		ext4_init_block_alloc_info(inode);
  
@@ -2221,8 +2317,12 @@
  
  	/*
  	 * See if request is beyond maximum number of blocks we can have in
-@@ -2371,7 +2429,18 @@ int ext4_ext_get_blocks(handle_t *handle
- 		allocated = le16_to_cpu(newex.ee_len);
+@@ -2368,10 +2428,21 @@ int ext4_ext_get_blocks(handle_t *handle
+ 	newex.ee_len = cpu_to_le16(max_blocks);
+ 	err = ext4_ext_check_overlap(inode, &newex, path);
+ 	if (err)
+-		allocated = le16_to_cpu(newex.ee_len);
++		allocated = ext4_ext_get_actual_len(&newex);
  	else
  		allocated = max_blocks;
 -	newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
@@ -2241,7 +2341,7 @@
  	if (!newblock)
  		goto out2;
  	ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
-@@ -2379,14 +2448,17 @@ int ext4_ext_get_blocks(handle_t *handle
+@@ -2379,14 +2450,17 @@ int ext4_ext_get_blocks(handle_t *handle
  
  	/* try to insert new extent into found leaf and return */
  	ext4_ext_store_pblock(&newex, newblock);
@@ -2257,19 +2357,19 @@
 +		ext4_mb_discard_inode_preallocations(inode);
  		ext4_free_blocks(handle, inode, ext_pblock(&newex),
 -					le16_to_cpu(newex.ee_len));
-+					le16_to_cpu(newex.ee_len), 0);
++					ext4_ext_get_actual_len(&newex), 0);
  		goto out2;
  	}
  
-@@ -2395,6 +2467,7 @@ int ext4_ext_get_blocks(handle_t *handle
+@@ -2395,6 +2469,7 @@ int ext4_ext_get_blocks(handle_t *handle
  
  	/* previous routine could use block we allocated */
  	newblock = ext_pblock(&newex);
-+	allocated = le16_to_cpu(newex.ee_len);
++	allocated = ext4_ext_get_actual_len(&newex);
  outnew:
  	__set_bit(BH_New, &bh_result->b_state);
  
-@@ -2414,8 +2487,6 @@ out2:
+@@ -2414,8 +2489,6 @@ out2:
  		ext4_ext_drop_refs(path);
  		kfree(path);
  	}
@@ -2278,7 +2378,7 @@
  	return err ? err : allocated;
  }
  
-@@ -2423,7 +2494,7 @@ void ext4_ext_truncate(struct inode * in
+@@ -2423,7 +2496,7 @@ void ext4_ext_truncate(struct inode * in
  {
  	struct address_space *mapping = inode->i_mapping;
  	struct super_block *sb = inode->i_sb;
@@ -2287,7 +2387,7 @@
  	handle_t *handle;
  	int err = 0;
  
-@@ -2445,9 +2516,11 @@ void ext4_ext_truncate(struct inode * in
+@@ -2445,9 +2518,11 @@ void ext4_ext_truncate(struct inode * in
  	if (page)
  		ext4_block_truncate_page(handle, page, mapping, inode->i_size);
  
@@ -2300,7 +2400,7 @@
  	/*
  	 * TODO: optimization is possible here.
  	 * Probably we need not scan at all,
-@@ -2481,7 +2554,7 @@ out_stop:
+@@ -2481,7 +2556,7 @@ out_stop:
  	if (inode->i_nlink)
  		ext4_orphan_del(handle, inode);
  
@@ -2309,7 +2409,7 @@
  	ext4_journal_stop(handle);
  }
  
-@@ -2516,7 +2589,8 @@ int ext4_ext_writepage_trans_blocks(stru
+@@ -2516,7 +2591,8 @@ int ext4_ext_writepage_trans_blocks(stru
  long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
  {
  	handle_t *handle;
@@ -2319,7 +2419,7 @@
  	ext4_fsblk_t nblocks = 0;
  	int ret = 0;
  	int ret2 = 0;
-@@ -2544,6 +2618,7 @@ long ext4_fallocate(struct inode *inode,
+@@ -2544,6 +2620,7 @@ long ext4_fallocate(struct inode *inode,
  	 * modify 1 super block, 1 block bitmap and 1 group descriptor.
  	 */
  	credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
@@ -2327,7 +2427,7 @@
  retry:
  	while (ret >= 0 && ret < max_blocks) {
  		block = block + ret;
-@@ -2557,12 +2632,12 @@ retry:
+@@ -2557,12 +2634,12 @@ retry:
  		ret = ext4_ext_get_blocks(handle, inode, block,
  					  max_blocks, &map_bh,
  					  EXT4_CREATE_UNINITIALIZED_EXT, 0);
@@ -2345,7 +2445,7 @@
  			ret = -EIO;
  			ext4_mark_inode_dirty(handle, inode);
  			ret2 = ext4_journal_stop(handle);
-@@ -2600,6 +2675,7 @@ retry:
+@@ -2600,6 +2677,7 @@ retry:
  	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
  		goto retry;
  
@@ -6421,7 +6521,7 @@
 ===================================================================
 --- /dev/null
 +++ linux-2.6.23.noarch/fs/ext4/mballoc.c
-@@ -0,0 +1,4418 @@
+@@ -0,0 +1,4434 @@
 +/*
 + * Copyright (c) 2003-2006, Cluster File Systems, Inc, info at clusterfs.com
 + * Written by Alex Tomas <alex at clusterfs.com>
@@ -6892,7 +6992,6 @@
 +static void ext4_mb_return_to_preallocation(struct inode *inode,
 +					struct ext4_buddy *e4b, sector_t block,
 +					int count);
-+static void ext4_mb_show_ac(struct ext4_allocation_context *ac);
 +static void ext4_mb_put_pa(struct ext4_allocation_context *, struct super_block *,
 +						struct ext4_prealloc_space *pa);
 +static int ext4_mb_init_per_dev_proc(struct super_block *sb);
@@ -8262,15 +8361,24 @@
 +	if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
 +		goto out;
 +
-+	i = ffs(ac->ac_g_ex.fe_len);
++	/*
++	 * ac->ac2_order is set only if the fe_len is a power of 2
++	 * if ac2_order is set we also set criteria to 0 so that we
++	 * try exact allocation using buddy.
++	 */
++	i = fls(ac->ac_g_ex.fe_len);
 +	ac->ac_2order = 0;
-+	/* FIXME!!
-+	 * What happens if i is still greater than s_mb_order2_reqs
++	/*
++	 * We search using buddy data only if the order of the request
++	 * is greater than equal to the sbi_s_mb_order2_reqs
++	 * You can tune it via /proc/fs/ext4/<partition>/order2_req
 +	 */
 +	if (i >= sbi->s_mb_order2_reqs) {
-+		i--;
-+		if ((ac->ac_g_ex.fe_len & (~(1 << i))) == 0)
-+			ac->ac_2order = i;
++		/*
++		 * This should tell if fe_len is exactly power of 2
++		 */
++		if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
++			ac->ac_2order = i - 1;
 +	}
 +
 +	bsbits = ac->ac_sb->s_blocksize_bits;
@@ -8289,17 +8397,17 @@
 +		spin_unlock(&sbi->s_md_lock);
 +	}
 +
++	/* searching for the right group start from the goal value specified */
 +	group = ac->ac_g_ex.fe_group;
 +
 +	/* Let's just scan groups to find more-less suitable blocks */
 +	cr = ac->ac_2order ? 0 : 1;
++	/*
++	 * cr == 0 try to get exact allocation,
++	 * cr == 3  try to get anything
++	 */
 +repeat:
 +	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
-+		/* FIXME!!
-+		 * We need to explain what criteria is and also
-+		 * need to define the number 0 to 4 for criteria
-+		 * What they actually means.
-+		 */
 +		ac->ac_criteria = cr;
 +		for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
 +			struct ext4_group_info *grp;
@@ -8313,23 +8421,28 @@
 +			if (grp->bb_free == 0)
 +				continue;
 +
++			/*
++			 * if the group is already init we check whether it is
++			 * a good group and if not we don't load the buddy
++			 */
 +			if (EXT4_MB_GRP_NEED_INIT(EXT4_GROUP_INFO(sb, group))) {
-+				/* we need full data about the group
-+				 * to make a good selection */
++				/*
++				 * we need full data about the group
++				 * to make a good selection
++				 */
 +				err = ext4_mb_load_buddy(sb, group, &e4b);
 +				if (err)
 +					goto out;
 +				ext4_mb_release_desc(&e4b);
 +			}
 +
-+			/* check is group good for our criteries */
++			/*
++			 * If the particular group doesn't satisfy our
++			 * criteria we continue with the next group
++			 */
 +			if (!ext4_mb_good_group(ac, group, cr))
 +				continue;
 +
-+			/* FIXME!!
-+			 * here also we are loading the buddy. so what difference
-+			 * does EXT4_MB_GRP_NEED_INIT actually make
-+			 */
 +			err = ext4_mb_load_buddy(sb, group, &e4b);
 +			if (err)
 +				goto out;
@@ -10150,10 +10263,10 @@
 +		busy = 0;
 +		ext4_unlock_group(sb, group);
 +		/*
-+		 * We see this quiet rare. But if a particular workload is
-+		 * effected by this we may need to add a waitqueue
++		 * Yield the CPU here so that we don't get soft lockup
++		 * in non preempt case.
 +		 */
-+		schedule_timeout(HZ);
++		yield();
 +		goto repeat;
 +	}
 +
@@ -10306,7 +10419,7 @@
 +{
 +	BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
 +}
-+
++#ifdef MB_DEBUG
 +static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
 +{
 +	struct super_block *sb = ac->ac_sb;
@@ -10356,6 +10469,9 @@
 +	}
 +	printk(KERN_ERR "\n");
 +}
++#else
++#define ext4_mb_show_ac(x)
++#endif
 +
 +/*
 + * We use locality group preallocation for small size file. The size of the




More information about the fedora-extras-commits mailing list