[Cluster-devel] [PATCH] GFS2: reread rindex when necessary to grow rindex

Steven Whitehouse swhiteho at redhat.com
Wed Nov 24 16:32:09 UTC 2010


Hi,

This doesn't apply to the current -nmw tree, but otherwise it looks
good,

Steve.

On Tue, 2010-11-23 at 23:51 -0600, Benjamin Marzinski wrote:
> When GFS2 grew the filesystem, it was never rereading the rindex file during
> the grow. This is necessary for large grows when the filesystem is almost full,
> and GFS2 needs to use some of the space allocated earlier in the grow to
> complete it.  Now, if GFS2 fails to reserve the necessary space and the rindex
> file is not uptodate, it rereads it.  Also, the only difference between
> gfs2_ri_update() and gfs2_ri_update_special() was that gfs2_ri_update_special()
> didn't clear out the existing resource groups, since you knew that it was only
> called when there were no resource groups.  Attempting to clear out the
> resource groups when there are none takes almost no time, and rarely happens,
> so I simply removed gfs2_ri_update_special().
> 
> Signed-off-by: Benjamin Marzinski <bmarzins at redhat.com>
> ---
>  fs/gfs2/rgrp.c |   53 +++++++++++------------------------------------------
>  1 file changed, 11 insertions(+), 42 deletions(-)
> 
> Index: gfs2-2.6-nmw/fs/gfs2/rgrp.c
> ===================================================================
> --- gfs2-2.6-nmw.orig/fs/gfs2/rgrp.c
> +++ gfs2-2.6-nmw/fs/gfs2/rgrp.c
> @@ -614,46 +614,6 @@ static int gfs2_ri_update(struct gfs2_in
>  }
>  
>  /**
> - * gfs2_ri_update_special - Pull in a new resource index from the disk
> - *
> - * This is a special version that's safe to call from gfs2_inplace_reserve_i.
> - * In this case we know that we don't have any resource groups in memory yet.
> - *
> - * @ip: pointer to the rindex inode
> - *
> - * Returns: 0 on successful update, error code otherwise
> - */
> -static int gfs2_ri_update_special(struct gfs2_inode *ip)
> -{
> -	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
> -	struct inode *inode = &ip->i_inode;
> -	struct file_ra_state ra_state;
> -	struct gfs2_rgrpd *rgd;
> -	unsigned int max_data = 0;
> -	int error;
> -
> -	file_ra_state_init(&ra_state, inode->i_mapping);
> -	for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
> -		/* Ignore partials */
> -		if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
> -		    i_size_read(inode))
> -			break;
> -		error = read_rindex_entry(ip, &ra_state);
> -		if (error) {
> -			clear_rgrpdi(sdp);
> -			return error;
> -		}
> -	}
> -	list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list)
> -		if (rgd->rd_data > max_data)
> -			max_data = rgd->rd_data;
> -	sdp->sd_max_rg_data = max_data;
> -
> -	sdp->sd_rindex_uptodate = 1;
> -	return 0;
> -}
> -
> -/**
>   * gfs2_rindex_hold - Grab a lock on the rindex
>   * @sdp: The GFS2 superblock
>   * @ri_gh: the glock holder
> @@ -1205,6 +1165,7 @@ int gfs2_inplace_reserve_i(struct gfs2_i
>  	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
>  	struct gfs2_alloc *al = ip->i_alloc;
>  	int error = 0;
> +	int reload_rindex = 0;
>  	u64 last_unlinked = NO_BLOCK, unlinked;
>  
>  	if (gfs2_assert_warn(sdp, al->al_requested))
> @@ -1215,8 +1176,12 @@ try_again:
>  	   the rindex itself, in which case it's already held. */
>  	if (ip != GFS2_I(sdp->sd_rindex))
>  		error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
> -	else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */
> -		error = gfs2_ri_update_special(ip);
> +	else if (!sdp->sd_rgrps || reload_rindex) {
> +		/* We need more space, and the rindex is stale.
> +		   reread it */
> +		error = gfs2_ri_update(ip);
> +		reload_rindex = 0;
> +	}
>  
>  	if (error)
>  		return error;
> @@ -1229,6 +1194,10 @@ try_again:
>  	if (error) {
>  		if (ip != GFS2_I(sdp->sd_rindex))
>  			gfs2_glock_dq_uninit(&al->al_ri_gh);
> +		else if (!sdp->sd_rindex_uptodate) {
> +			reload_rindex = 1;
> +			goto try_again;
> +		}
>  		if (error != -EAGAIN)
>  			return error;
>  
> 





More information about the Cluster-devel mailing list