summary refs log tree commit diff
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-15 20:02:33 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-15 20:20:17 -0400
commit6e74057c4686dc12ea767b4bdc50a63876056e1c (patch)
treee7c70b8e08ab9e5363be28bcbcc72348122ae6e4 /fs/btrfs/async-thread.c
parent627e421a3f35ad6b52dc58982fb6f8a97c30dcd7 (diff)
downloadlinux-6e74057c4686dc12ea767b4bdc50a63876056e1c.tar.gz
Btrfs: Fix async thread shutdown race
It was possible for an async worker thread to be selected to
receive a new work item, but exit before the work item was
actually placed into that thread's work list.

This commit fixes the race by incrementing the num_pending
counter earlier, and making sure to check the number of pending
work items before a thread exits.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 80e33bc96c84..282ca085c2fb 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -191,7 +191,8 @@ static int try_worker_shutdown(struct btrfs_worker_thread *worker)
 	    !worker->working &&
 	    !list_empty(&worker->worker_list) &&
 	    list_empty(&worker->prio_pending) &&
-	    list_empty(&worker->pending)) {
+	    list_empty(&worker->pending) &&
+	    atomic_read(&worker->num_pending) == 0) {
 		freeit = 1;
 		list_del_init(&worker->worker_list);
 		worker->workers->num_workers--;
@@ -485,7 +486,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
 	 */
 	next = workers->worker_list.next;
 	worker = list_entry(next, struct btrfs_worker_thread, worker_list);
-	atomic_inc(&worker->num_pending);
 	worker->sequence++;
 
 	if (worker->sequence % workers->idle_thresh == 0)
@@ -521,8 +521,7 @@ again:
 			goto again;
 		}
 	}
-	spin_unlock_irqrestore(&workers->lock, flags);
-	return worker;
+	goto found;
 
 fallback:
 	fallback = NULL;
@@ -537,6 +536,12 @@ fallback:
 	BUG_ON(!fallback);
 	worker = list_entry(fallback,
 		  struct btrfs_worker_thread, worker_list);
+found:
+	/*
+	 * this makes sure the worker doesn't exit before it is placed
+	 * onto a busy/idle list
+	 */
+	atomic_inc(&worker->num_pending);
 	spin_unlock_irqrestore(&workers->lock, flags);
 	return worker;
 }
@@ -569,7 +574,7 @@ int btrfs_requeue_work(struct btrfs_work *work)
 		spin_lock(&worker->workers->lock);
 		worker->idle = 0;
 		list_move_tail(&worker->worker_list,
-			       &worker->workers->worker_list);
+			      &worker->workers->worker_list);
 		spin_unlock(&worker->workers->lock);
 	}
 	if (!worker->working) {
@@ -627,7 +632,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 		list_add_tail(&work->list, &worker->prio_pending);
 	else
 		list_add_tail(&work->list, &worker->pending);
-	atomic_inc(&worker->num_pending);
 	check_busy_worker(worker);
 
 	/*