[Cluster-devel] [PATCH 1/2] gfs2: Pass write offset to gfs2_write_calc_reserv

Andreas Gruenbacher agruenba at redhat.com
Fri Jul 13 16:18:23 UTC 2018


Pass the offset of the write to gfs2_write_calc_reserv so that we can
then compute a better upper bound of the number of indirect blocks
required.

Signed-off-by: Andreas Gruenbacher <agruenba at redhat.com>
---
 fs/gfs2/bmap.c  |  4 ++--
 fs/gfs2/bmap.h  |  2 ++
 fs/gfs2/file.c  | 12 ++++++------
 fs/gfs2/quota.c | 30 +++++++++++++++++-------------
 4 files changed, 27 insertions(+), 21 deletions(-)

diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 89f1f7d3186d..7d3bb327f8b7 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1003,8 +1003,8 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
 	alloc_required = unstuff || iomap->type == IOMAP_HOLE;
 
 	if (alloc_required || gfs2_is_jdata(ip))
-		gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
-				       &ind_blocks);
+		gfs2_write_calc_reserv(ip, iomap->offset, iomap->length,
+				       &data_blocks, &ind_blocks);
 
 	if (alloc_required) {
 		struct gfs2_alloc_parms ap = {
diff --git a/fs/gfs2/bmap.h b/fs/gfs2/bmap.h
index 6b18fb323f0a..64970536c7d6 100644
--- a/fs/gfs2/bmap.h
+++ b/fs/gfs2/bmap.h
@@ -22,6 +22,7 @@ struct page;
 /**
  * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file
  * @ip: the file
+ * @pos: file offset of the write
  * @len: the number of bytes to be written to the file
  * @data_blocks: returns the number of data blocks required
  * @ind_blocks: returns the number of indirect blocks required
@@ -29,6 +30,7 @@ struct page;
  */
 
 static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
+					  u64 pos,
 					  unsigned int len,
 					  unsigned int *data_blocks,
 					  unsigned int *ind_blocks)
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 08369c6cd127..93f59f9eecbd 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -435,7 +435,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
 	if (ret)
 		goto out_unlock;
 
-	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
+	gfs2_write_calc_reserv(ip, pos, PAGE_SIZE, &data_blocks, &ind_blocks);
 	ap.target = data_blocks + ind_blocks;
 	ret = gfs2_quota_lock_check(ip, &ap);
 	if (ret)
@@ -918,7 +918,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
  *
  * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
  */
-static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
+static void calc_max_reserv(struct gfs2_inode *ip, loff_t pos, loff_t *len,
 			    unsigned int *data_blocks, unsigned int *ind_blocks,
 			    unsigned int max_blocks)
 {
@@ -936,7 +936,7 @@ static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
 	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
 	if (*len > max) {
 		*len = max;
-		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
+		gfs2_write_calc_reserv(ip, pos, max, data_blocks, ind_blocks);
 	}
 }
 
@@ -969,7 +969,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
 
 	gfs2_size_hint(file, offset, len);
 
-	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
+	gfs2_write_calc_reserv(ip, offset, PAGE_SIZE, &data_blocks, &ind_blocks);
 	ap.min_target = data_blocks + ind_blocks;
 
 	while (len > 0) {
@@ -991,7 +991,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
 		 * calculate a more realistic 'bytes' to serve as a good
 		 * starting point for the number of bytes we may be able
 		 * to write */
-		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
+		gfs2_write_calc_reserv(ip, offset, bytes, &data_blocks, &ind_blocks);
 		ap.target = data_blocks + ind_blocks;
 
 		error = gfs2_quota_lock_check(ip, &ap);
@@ -1014,7 +1014,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
 		/* Almost done. Calculate bytes that can be written using
 		 * max_blks. We also recompute max_bytes, data_blocks and
 		 * ind_blocks */
-		calc_max_reserv(ip, &max_bytes, &data_blocks,
+		calc_max_reserv(ip, offset, &max_bytes, &data_blocks,
 				&ind_blocks, max_blks);
 
 		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 0efae7a0ee80..0b9d7328ecbc 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -870,22 +870,19 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
 	struct gfs2_alloc_parms ap = { .aflags = 0, };
-	unsigned int data_blocks, ind_blocks;
+	unsigned int data_blocks = 0, ind_blocks = 0;
 	struct gfs2_holder *ghs, i_gh;
 	unsigned int qx, x;
 	struct gfs2_quota_data *qd;
 	unsigned reserved;
 	loff_t offset;
-	unsigned int nalloc = 0, blocks;
+	unsigned int blocks;
 	int error;
 
 	error = gfs2_rsqa_alloc(ip);
 	if (error)
 		return error;
 
-	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
-			      &data_blocks, &ind_blocks);
-
 	ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
 	if (!ghs)
 		return -ENOMEM;
@@ -904,10 +901,17 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 		goto out;
 
 	for (x = 0; x < num_qd; x++) {
+		unsigned int d, i;
+
 		offset = qd2offset(qda[x]);
-		if (gfs2_write_alloc_required(ip, offset,
-					      sizeof(struct gfs2_quota)))
-			nalloc++;
+		if (!gfs2_write_alloc_required(ip, offset,
+					       sizeof(struct gfs2_quota)))
+			continue;
+
+		gfs2_write_calc_reserv(ip, offset, sizeof(struct gfs2_quota),
+				       &d, &i);
+		data_blocks += d;
+		ind_blocks += i;
 	}
 
 	/* 
@@ -919,16 +923,16 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 	/* +3 in the end for unstuffing block, inode size update block
 	 * and another block in case quota straddles page boundary and 
 	 * two blocks need to be updated instead of 1 */
-	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
+	blocks = data_blocks + RES_DINODE + num_qd + 3;
 
-	reserved = 1 + (nalloc * (data_blocks + ind_blocks));
+	reserved = 1 + data_blocks + ind_blocks;
 	ap.target = reserved;
 	error = gfs2_inplace_reserve(ip, &ap);
 	if (error)
 		goto out_alloc;
 
-	if (nalloc)
-		blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS;
+	if (ind_blocks)
+		blocks += gfs2_rg_blocks(ip, reserved) + ind_blocks + RES_STATFS;
 
 	error = gfs2_trans_begin(sdp, blocks, 0);
 	if (error)
@@ -1716,7 +1720,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
 		alloc_required = 1;
 	if (alloc_required) {
 		struct gfs2_alloc_parms ap = { .aflags = 0, };
-		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
+		gfs2_write_calc_reserv(ip, offset, sizeof(struct gfs2_quota),
 				       &data_blocks, &ind_blocks);
 		blocks = 1 + data_blocks + ind_blocks;
 		ap.target = blocks;
-- 
2.17.1




More information about the Cluster-devel mailing list