[PATCH 1/4] fs, afs: convert afs_cell.usage from atomic_t to refcount_t

Reshetova, Elena elena.reshetova at intel.com
Thu Feb 23 05:32:10 PST 2017


> On Wed, Feb 22, 2017 at 9:29 AM, David Howells <dhowells at redhat.com>
> wrote:
> > Reshetova, Elena <elena.reshetova at intel.com> wrote:
> >
> >> Thank you very much David for testing the patches!
> >> I guess for this one and other two patches it means that if we want to do
> the atomic_t --> refcount_t conversions,
> >> we need to do +1 on the whole counting scheme to avoid issues around
> reaching zero.
> >> Do you see this approach reasonable? I can give it a try, if it makes sense in
> your opinion.
> >
> > Or you could create a refcount_inc_may_resurrect() function that does allow
> > increment from 0.  Make it take a lock-check like the rcu functions do.
> 
> We can't allow the increment from 0 since it violates the intended
> use-after-free protections. If "0" means "still valid" then this
> sounds like it needs a global +1, as Elena suggested in her reply.
> 

I think the below modified version of this patch should do it correctly.
Note: as I understand we can safely split refcount_dec and further refcount_read in afs_put_cell() 
because we are under write_lock(&afs_cells_lock).
-----------------------

refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <elena.reshetova at intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel at gmail.com>
Signed-off-by: Kees Cook <keescook at chromium.org>
Signed-off-by: David Windsor <dwindsor at gmail.com>
---
 fs/afs/cell.c     | 24 +++++++++++++-----------
 fs/afs/internal.h |  4 ++--
 fs/afs/proc.c     |  2 +-
 3 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index ca0a3cf..a39410f 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -60,7 +60,7 @@ static struct afs_cell *afs_cell_alloc(const char *name, unsigned namelen,
 	memcpy(cell->name, name, namelen);
 	cell->name[namelen] = 0;
 
-	atomic_set(&cell->usage, 1);
+	refcount_set(&cell->usage, 2);
 	INIT_LIST_HEAD(&cell->link);
 	rwlock_init(&cell->servers_lock);
 	INIT_LIST_HEAD(&cell->servers);
@@ -345,15 +345,17 @@ void afs_put_cell(struct afs_cell *cell)
 	if (!cell)
 		return;
 
-	_enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
+	_enter("%p{%d,%s}", cell, refcount_read(&cell->usage), cell->name);
 
-	ASSERTCMP(atomic_read(&cell->usage), >, 0);
+	ASSERTCMP(refcount_read(&cell->usage), >, 1);
 
 	/* to prevent a race, the decrement and the dequeue must be effectively
 	 * atomic */
 	write_lock(&afs_cells_lock);
 
-	if (likely(!atomic_dec_and_test(&cell->usage))) {
+	refcount_dec(&cell->usage);
+
+	if (likely(refcount_read(&cell->usage) > 1)) {
 		write_unlock(&afs_cells_lock);
 		_leave("");
 		return;
@@ -376,20 +378,20 @@ void afs_put_cell(struct afs_cell *cell)
  */
 static void afs_cell_destroy(struct afs_cell *cell)
 {
-	_enter("%p{%d,%s}", cell, atomic_read(&cell->usage), cell->name);
+	_enter("%p{%d,%s}", cell, refcount_read(&cell->usage), cell->name);
 
-	ASSERTCMP(atomic_read(&cell->usage), >=, 0);
+	ASSERTCMP(refcount_read(&cell->usage), >=, 1);
 	ASSERT(list_empty(&cell->link));
 
 	/* wait for everyone to stop using the cell */
-	if (atomic_read(&cell->usage) > 0) {
+	if (refcount_read(&cell->usage) > 1) {
 		DECLARE_WAITQUEUE(myself, current);
 
 		_debug("wait for cell %s", cell->name);
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		add_wait_queue(&afs_cells_freeable_wq, &myself);
 
-		while (atomic_read(&cell->usage) > 0) {
+		while (refcount_read(&cell->usage) > 1) {
 			schedule();
 			set_current_state(TASK_UNINTERRUPTIBLE);
 		}
@@ -399,7 +401,7 @@ static void afs_cell_destroy(struct afs_cell *cell)
 	}
 
 	_debug("cell dead");
-	ASSERTCMP(atomic_read(&cell->usage), ==, 0);
+	ASSERTCMP(refcount_read(&cell->usage), ==, 1);
 	ASSERT(list_empty(&cell->servers));
 	ASSERT(list_empty(&cell->vl_list));
 
@@ -447,8 +449,8 @@ void afs_cell_purge(void)
 		write_unlock(&afs_cells_lock);
 
 		if (cell) {
-			_debug("PURGING CELL %s (%d)",
-			       cell->name, atomic_read(&cell->usage));
+			_debug("PURGING CELL %s (%u)",
+			       cell->name, refcount_read(&cell->usage));
 
 			/* now the cell should be left with no references */
 			afs_cell_destroy(cell);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 8acf367..e77fd4d 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -192,7 +192,7 @@ struct afs_cache_cell {
  * AFS cell record
  */
 struct afs_cell {
-	atomic_t		usage;
+	refcount_t		usage;
 	struct list_head	link;		/* main cell list link */
 	struct key		*anonymous_key;	/* anonymous user key for this cell */
 	struct list_head	proc_link;	/* /proc cell list link */
@@ -445,7 +445,7 @@ extern void afs_callback_update_kill(void);
 extern struct rw_semaphore afs_proc_cells_sem;
 extern struct list_head afs_proc_cells;
 
-#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
+#define afs_get_cell(C) do { refcount_inc(&(C)->usage); } while(0)
 extern int afs_cell_init(char *);
 extern struct afs_cell *afs_cell_create(const char *, unsigned, char *, bool);
 extern struct afs_cell *afs_cell_lookup(const char *, unsigned, bool);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 35efb9a..7eec16d 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -212,7 +212,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
 
 	/* display one cell per line on subsequent lines */
 	seq_printf(m, "%3d %s\n",
-		   atomic_read(&cell->usage), cell->name);
+		   refcount_read(&cell->usage), cell->name);
 	return 0;
 }
 
-- 
2.7.4



More information about the linux-afs mailing list