[Cluster-devel] [RHEL5 gfs-kmod patch] GFS: Write vfs inode attributes to disk in gfs_inode_write

Steven Whitehouse swhiteho at redhat.com
Wed Mar 26 09:01:18 UTC 2014


Hi,

On Tue, 2014-03-25 at 15:32 -0500, Benjamin Marzinski wrote:
> GFS wasn't ever updating mtime during mmaps to a file.  This patch makes
> gfs_write_inode write the vfs inode attributes out to disk, so that they
> get updated when the mmap data is written back to disk.
> 
> Signed-off-by: Benjamin Marzinski <bmarzins at redhat.com>

Looks good to me. ACK,

Steve.

> ---
> diff --git a/gfs-kernel/src/gfs/ops_super.c b/gfs-kernel/src/gfs/ops_super.c
> index c111a2e..0d17735 100644
> --- a/gfs-kernel/src/gfs/ops_super.c
> +++ b/gfs-kernel/src/gfs/ops_super.c
> @@ -39,6 +39,7 @@
>  #include "super.h"
>  #include "sys.h"
>  #include "mount.h"
> +#include "trans.h"
>  
>  /**
>   * gfs_write_inode - Make sure the inode is stable on the disk
> @@ -51,14 +52,50 @@
>  static int
>  gfs_write_inode(struct inode *inode, int sync)
>  {
> +	int ret = 0;
>  	struct gfs_inode *ip = get_v2ip(inode);
> +	struct buffer_head *dibh;
> +	struct gfs_holder i_gh;
> +	int need_unlock = 0;
> +
> +	if (!ip)
> +		return 0;
>  
>  	atomic_inc(&ip->i_sbd->sd_ops_super);
>  
> -	if (ip && sync)
> +	if (current->flags & PF_MEMALLOC)
> +		goto do_flush;
> +
> +	if (!gfs_glock_is_locked_by_me(ip->i_gl)) {
> +		ret = gfs_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
> +		if (ret)
> +			goto do_flush;
> +		need_unlock = 1;
> +	}
> +	/* Trans may require:
> +	   one dinode block. */
> +	ret = gfs_trans_begin(ip->i_sbd, 1, 0);
> +	if (ret)
> +		goto do_unlock;
> +
> +	ret = gfs_get_inode_buffer(ip, &dibh);
> +	if (ret == 0) {
> +		gfs_inode_attr_out(ip);
> +		gfs_trans_add_bh(ip->i_gl, dibh);
> +		gfs_dinode_out(&ip->i_di, dibh->b_data);
> +		brelse(dibh);
> +	}
> +
> +	gfs_trans_end(ip->i_sbd);
> +
> +do_unlock:
> +	if (need_unlock)
> +		gfs_glock_dq_uninit(&i_gh);
> +do_flush:
> +	if (sync)
>  		gfs_log_flush_glock(ip->i_gl, 0);
>  
> -	return 0;
> +	return ret;
>  }
>  
>  /**
> 





More information about the Cluster-devel mailing list