summary refs log tree commit diff
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-11-13 16:23:44 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-12-06 10:46:26 -0500
commit8aca67f0ae2d8811165c22326825a645cc8e1b48 (patch)
tree19e82f4bc7b4f865a9dcf4744e7c224ea517ba10
parente6b3c4db6fbcd0d33720696f37790d6b8be12313 (diff)
downloadlinux-8aca67f0ae2d8811165c22326825a645cc8e1b48.tar.gz
SUNRPC: Fix a potential race in rpc_wake_up_task()
Use RCU to ensure that we can safely call rpc_finish_wakeup after we've
called __rpc_do_wake_up_task. If not, there is a theoretical race, in which
the rpc_task finishes executing, and gets freed first.

Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
-rw-r--r--fs/nfs/read.c8
-rw-r--r--fs/nfs/write.c20
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--net/sunrpc/sched.c30
5 files changed, 47 insertions, 20 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c2e49c397a27..8b58bbf6e39e 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -65,13 +65,19 @@ struct nfs_read_data *nfs_readdata_alloc(size_t len)
 	return p;
 }
 
-static void nfs_readdata_free(struct nfs_read_data *p)
+static void nfs_readdata_rcu_free(struct rcu_head *head)
 {
+	struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
 	if (p && (p->pagevec != &p->page_array[0]))
 		kfree(p->pagevec);
 	mempool_free(p, nfs_rdata_mempool);
 }
 
+static void nfs_readdata_free(struct nfs_read_data *rdata)
+{
+	call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
+}
+
 void nfs_readdata_release(void *data)
 {
         nfs_readdata_free(data);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 883dd4a1c157..29d88209199d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -102,13 +102,19 @@ struct nfs_write_data *nfs_commit_alloc(void)
 	return p;
 }
 
-void nfs_commit_free(struct nfs_write_data *p)
+void nfs_commit_rcu_free(struct rcu_head *head)
 {
+	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
 	if (p && (p->pagevec != &p->page_array[0]))
 		kfree(p->pagevec);
 	mempool_free(p, nfs_commit_mempool);
 }
 
+void nfs_commit_free(struct nfs_write_data *wdata)
+{
+	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
+}
+
 struct nfs_write_data *nfs_writedata_alloc(size_t len)
 {
 	unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -131,13 +137,19 @@ struct nfs_write_data *nfs_writedata_alloc(size_t len)
 	return p;
 }
 
-static void nfs_writedata_free(struct nfs_write_data *p)
+static void nfs_writedata_rcu_free(struct rcu_head *head)
 {
+	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
 	if (p && (p->pagevec != &p->page_array[0]))
 		kfree(p->pagevec);
 	mempool_free(p, nfs_wdata_mempool);
 }
 
+static void nfs_writedata_free(struct nfs_write_data *wdata)
+{
+	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
+}
+
 void nfs_writedata_release(void *wdata)
 {
 	nfs_writedata_free(wdata);
@@ -258,7 +270,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
 io_error:
 	nfs_end_data_update(inode);
 	end_page_writeback(page);
-	nfs_writedata_free(wdata);
+	nfs_writedata_release(wdata);
 	return written ? written : result;
 }
 
@@ -1043,7 +1055,7 @@ out_bad:
 	while (!list_empty(&list)) {
 		data = list_entry(list.next, struct nfs_write_data, pages);
 		list_del(&data->pages);
-		nfs_writedata_free(data);
+		nfs_writedata_release(data);
 	}
 	nfs_mark_request_dirty(req);
 	nfs_clear_page_writeback(req);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 625ffea98561..02f38189d180 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -428,11 +428,6 @@ extern int  nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
 extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
 extern void nfs_writedata_release(void *);
 
-#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-struct nfs_write_data *nfs_commit_alloc(void);
-void nfs_commit_free(struct nfs_write_data *p);
-#endif
-
 /*
  * Try to write back everything synchronously (but check the
  * return value!)
@@ -440,6 +435,8 @@ void nfs_commit_free(struct nfs_write_data *p);
 extern int  nfs_sync_inode_wait(struct inode *, unsigned long, unsigned int, int);
 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
 extern int  nfs_commit_inode(struct inode *, int);
+extern struct nfs_write_data *nfs_commit_alloc(void);
+extern void nfs_commit_free(struct nfs_write_data *wdata);
 extern void nfs_commit_release(void *wdata);
 #else
 static inline int
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 9fdb8c9d09f2..14fc813ddd0c 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -11,6 +11,7 @@
 
 #include <linux/timer.h>
 #include <linux/sunrpc/types.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
@@ -85,6 +86,7 @@ struct rpc_task {
 	union {
 		struct work_struct	tk_work;	/* Async task work queue */
 		struct rpc_wait		tk_wait;	/* RPC wait */
+		struct rcu_head		tk_rcu;		/* for task deletion */
 	} u;
 
 	unsigned short		tk_timeouts;	/* maj timeouts */
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 66d01365f3a5..6b808c03fb72 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -427,16 +427,19 @@ __rpc_default_timer(struct rpc_task *task)
  */
 void rpc_wake_up_task(struct rpc_task *task)
 {
+	rcu_read_lock_bh();
 	if (rpc_start_wakeup(task)) {
 		if (RPC_IS_QUEUED(task)) {
 			struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
 
-			spin_lock_bh(&queue->lock);
+			/* Note: we're already in a bh-safe context */
+			spin_lock(&queue->lock);
 			__rpc_do_wake_up_task(task);
-			spin_unlock_bh(&queue->lock);
+			spin_unlock(&queue->lock);
 		}
 		rpc_finish_wakeup(task);
 	}
+	rcu_read_unlock_bh();
 }
 
 /*
@@ -499,14 +502,16 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
 	struct rpc_task	*task = NULL;
 
 	dprintk("RPC:      wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
-	spin_lock_bh(&queue->lock);
+	rcu_read_lock_bh();
+	spin_lock(&queue->lock);
 	if (RPC_IS_PRIORITY(queue))
 		task = __rpc_wake_up_next_priority(queue);
 	else {
 		task_for_first(task, &queue->tasks[0])
 			__rpc_wake_up_task(task);
 	}
-	spin_unlock_bh(&queue->lock);
+	spin_unlock(&queue->lock);
+	rcu_read_unlock_bh();
 
 	return task;
 }
@@ -522,7 +527,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
 	struct rpc_task *task, *next;
 	struct list_head *head;
 
-	spin_lock_bh(&queue->lock);
+	rcu_read_lock_bh();
+	spin_lock(&queue->lock);
 	head = &queue->tasks[queue->maxpriority];
 	for (;;) {
 		list_for_each_entry_safe(task, next, head, u.tk_wait.list)
@@ -531,7 +537,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
 			break;
 		head--;
 	}
-	spin_unlock_bh(&queue->lock);
+	spin_unlock(&queue->lock);
+	rcu_read_unlock_bh();
 }
 
 /**
@@ -546,7 +553,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 	struct rpc_task *task, *next;
 	struct list_head *head;
 
-	spin_lock_bh(&queue->lock);
+	rcu_read_lock_bh();
+	spin_lock(&queue->lock);
 	head = &queue->tasks[queue->maxpriority];
 	for (;;) {
 		list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
@@ -557,7 +565,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
 			break;
 		head--;
 	}
-	spin_unlock_bh(&queue->lock);
+	spin_unlock(&queue->lock);
+	rcu_read_unlock_bh();
 }
 
 static void __rpc_atrun(struct rpc_task *task)
@@ -817,8 +826,9 @@ rpc_alloc_task(void)
 	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
 }
 
-static void rpc_free_task(struct rpc_task *task)
+static void rpc_free_task(struct rcu_head *rcu)
 {
+	struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
 	dprintk("RPC: %4d freeing task\n", task->tk_pid);
 	mempool_free(task, rpc_task_mempool);
 }
@@ -872,7 +882,7 @@ void rpc_put_task(struct rpc_task *task)
 		task->tk_client = NULL;
 	}
 	if (task->tk_flags & RPC_TASK_DYNAMIC)
-		rpc_free_task(task);
+		call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
 	if (tk_ops->rpc_release)
 		tk_ops->rpc_release(calldata);
 }