[Linux-cachefs] [PATCH] fscache: Convert from atomic_t to refcount_t on fscache_operation->usage

Xiyu Yang xiyuyang19 at fudan.edu.cn
Mon Jul 19 08:31:53 UTC 2021


refcount_t type and corresponding API can protect refcounters from
accidental underflow and overflow and further use-after-free situations.

Signed-off-by: Xiyu Yang <xiyuyang19 at fudan.edu.cn>
Signed-off-by: Xin Tan <tanxin.ctf at gmail.com>
---
 fs/fscache/operation.c        | 38 +++++++++++++++++++-------------------
 include/linux/fscache-cache.h |  3 ++-
 2 files changed, 21 insertions(+), 20 deletions(-)

diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index 4a5651d4904e..619ed21d24e7 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -35,7 +35,7 @@ void fscache_operation_init(struct fscache_cookie *cookie,
 			    fscache_operation_release_t release)
 {
 	INIT_WORK(&op->work, fscache_op_work_func);
-	atomic_set(&op->usage, 1);
+	refcount_set(&op->usage, 1);
 	op->state = FSCACHE_OP_ST_INITIALISED;
 	op->debug_id = atomic_inc_return(&fscache_op_debug_id);
 	op->processor = processor;
@@ -60,12 +60,12 @@ void fscache_enqueue_operation(struct fscache_operation *op)
 	struct fscache_cookie *cookie = op->object->cookie;
 	
 	_enter("{OBJ%x OP%x,%u}",
-	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+	       op->object->debug_id, op->debug_id, refcount_read(&op->usage));
 
 	ASSERT(list_empty(&op->pend_link));
 	ASSERT(op->processor != NULL);
 	ASSERT(fscache_object_is_available(op->object));
-	ASSERTCMP(atomic_read(&op->usage), >, 0);
+	ASSERTCMP(refcount_read(&op->usage), >, 0);
 	ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
 		    op->state, ==,  FSCACHE_OP_ST_CANCELLED);
 
@@ -74,7 +74,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
 	case FSCACHE_OP_ASYNC:
 		trace_fscache_op(cookie, op, fscache_op_enqueue_async);
 		_debug("queue async");
-		atomic_inc(&op->usage);
+		refcount_inc(&op->usage);
 		if (!queue_work(fscache_op_wq, &op->work))
 			fscache_put_operation(op);
 		break;
@@ -163,7 +163,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
 	trace_fscache_op(object->cookie, op, fscache_op_submit_ex);
 
 	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
-	ASSERTCMP(atomic_read(&op->usage), >, 0);
+	ASSERTCMP(refcount_read(&op->usage), >, 0);
 
 	spin_lock(&object->lock);
 	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
@@ -190,11 +190,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
 		object->n_exclusive++;	/* reads and writes must wait */
 
 		if (object->n_in_progress > 0) {
-			atomic_inc(&op->usage);
+			refcount_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
 			fscache_stat(&fscache_n_op_pend);
 		} else if (!list_empty(&object->pending_ops)) {
-			atomic_inc(&op->usage);
+			refcount_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
 			fscache_stat(&fscache_n_op_pend);
 			fscache_start_operations(object);
@@ -210,7 +210,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
 		op->object = object;
 		object->n_ops++;
 		object->n_exclusive++;	/* reads and writes must wait */
-		atomic_inc(&op->usage);
+		refcount_inc(&op->usage);
 		list_add_tail(&op->pend_link, &object->pending_ops);
 		fscache_stat(&fscache_n_op_pend);
 		ret = 0;
@@ -245,12 +245,12 @@ int fscache_submit_op(struct fscache_object *object,
 	int ret;
 
 	_enter("{OBJ%x OP%x},{%u}",
-	       object->debug_id, op->debug_id, atomic_read(&op->usage));
+	       object->debug_id, op->debug_id, refcount_read(&op->usage));
 
 	trace_fscache_op(object->cookie, op, fscache_op_submit);
 
 	ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
-	ASSERTCMP(atomic_read(&op->usage), >, 0);
+	ASSERTCMP(refcount_read(&op->usage), >, 0);
 
 	spin_lock(&object->lock);
 	ASSERTCMP(object->n_ops, >=, object->n_in_progress);
@@ -276,11 +276,11 @@ int fscache_submit_op(struct fscache_object *object,
 		object->n_ops++;
 
 		if (object->n_exclusive > 0) {
-			atomic_inc(&op->usage);
+			refcount_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
 			fscache_stat(&fscache_n_op_pend);
 		} else if (!list_empty(&object->pending_ops)) {
-			atomic_inc(&op->usage);
+			refcount_inc(&op->usage);
 			list_add_tail(&op->pend_link, &object->pending_ops);
 			fscache_stat(&fscache_n_op_pend);
 			fscache_start_operations(object);
@@ -292,7 +292,7 @@ int fscache_submit_op(struct fscache_object *object,
 	} else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
 		op->object = object;
 		object->n_ops++;
-		atomic_inc(&op->usage);
+		refcount_inc(&op->usage);
 		list_add_tail(&op->pend_link, &object->pending_ops);
 		fscache_stat(&fscache_n_op_pend);
 		ret = 0;
@@ -370,7 +370,7 @@ int fscache_cancel_op(struct fscache_operation *op,
 
 	ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
 	ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
-	ASSERTCMP(atomic_read(&op->usage), >, 0);
+	ASSERTCMP(refcount_read(&op->usage), >, 0);
 
 	spin_lock(&object->lock);
 
@@ -497,11 +497,11 @@ void fscache_put_operation(struct fscache_operation *op)
 
 	_enter("{OBJ%x OP%x,%d}",
 	       op->object ? op->object->debug_id : 0,
-	       op->debug_id, atomic_read(&op->usage));
+	       op->debug_id, refcount_read(&op->usage));
 
-	ASSERTCMP(atomic_read(&op->usage), >, 0);
+	ASSERTCMP(refcount_read(&op->usage), >, 0);
 
-	if (!atomic_dec_and_test(&op->usage))
+	if (!refcount_dec_and_test(&op->usage))
 		return;
 
 	trace_fscache_op(op->object ? op->object->cookie : NULL, op, fscache_op_put);
@@ -589,7 +589,7 @@ void fscache_operation_gc(struct work_struct *work)
 		       object->debug_id, op->debug_id);
 		fscache_stat(&fscache_n_op_gc);
 
-		ASSERTCMP(atomic_read(&op->usage), ==, 0);
+		ASSERTCMP(refcount_read(&op->usage), ==, 0);
 		ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
 
 		ASSERTCMP(object->n_ops, >, 0);
@@ -619,7 +619,7 @@ void fscache_op_work_func(struct work_struct *work)
 	unsigned long start;
 
 	_enter("{OBJ%x OP%x,%d}",
-	       op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+	       op->object->debug_id, op->debug_id, refcount_read(&op->usage));
 
 	trace_fscache_op(op->object->cookie, op, fscache_op_work);
 
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 3235ddbdcc09..9fddf7817948 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -14,6 +14,7 @@
 #ifndef _LINUX_FSCACHE_CACHE_H
 #define _LINUX_FSCACHE_CACHE_H
 
+#include <linux/refcount.h>
 #include <linux/fscache.h>
 #include <linux/sched.h>
 #include <linux/workqueue.h>
@@ -110,7 +111,7 @@ struct fscache_operation {
 #define FSCACHE_OP_KEEP_FLAGS	0x00f0	/* flags to keep when repurposing an op */
 
 	enum fscache_operation_state state;
-	atomic_t		usage;
+	refcount_t		usage;
 	unsigned		debug_id;	/* debugging ID */
 
 	/* operation processor callback
-- 
2.7.4




More information about the Linux-cachefs mailing list