summary refs log tree commit diff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-12-11 09:19:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-12-11 09:19:44 -0800
commitf152165ada75e1efc7bffbea8a188652bcd04f32 (patch)
tree06c8f41efb0ae2f51f5219420cdaba8253c4ec8a /fs
parentbd66be54b92e66d6e27af4b57407052edbed7178 (diff)
parent71a85387546e50b1a37b0fa45dadcae3bfb35cf6 (diff)
downloadlinux-f152165ada75e1efc7bffbea8a188652bcd04f32.tar.gz
Merge tag 'io_uring-5.16-2021-12-10' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe:
 "A few fixes that are all bound for stable:

   - Two syzbot reports for io-wq that turned out to be separate fixes,
     but ultimately very closely related

   - io_uring task_work running on cancelations"

* tag 'io_uring-5.16-2021-12-10' of git://git.kernel.dk/linux-block:
  io-wq: check for wq exit after adding new worker task_work
  io_uring: ensure task_work gets run as part of cancelations
  io-wq: remove spurious bit clear on task_work addition
Diffstat (limited to 'fs')
-rw-r--r--fs/io-wq.c29
-rw-r--r--fs/io_uring.c6
2 files changed, 27 insertions, 8 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 50cf9f92da36..8d2bb818a3bb 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -142,6 +142,7 @@ static bool io_acct_cancel_pending_work(struct io_wqe *wqe,
 					struct io_wqe_acct *acct,
 					struct io_cb_cancel_data *match);
 static void create_worker_cb(struct callback_head *cb);
+static void io_wq_cancel_tw_create(struct io_wq *wq);
 
 static bool io_worker_get(struct io_worker *worker)
 {
@@ -357,12 +358,22 @@ static bool io_queue_worker_create(struct io_worker *worker,
 	    test_and_set_bit_lock(0, &worker->create_state))
 		goto fail_release;
 
+	atomic_inc(&wq->worker_refs);
 	init_task_work(&worker->create_work, func);
 	worker->create_index = acct->index;
 	if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL)) {
-		clear_bit_unlock(0, &worker->create_state);
+		/*
+		 * EXIT may have been set after checking it above, check after
+		 * adding the task_work and remove any creation item if it is
+		 * now set. wq exit does that too, but we can have added this
+		 * work item after we canceled in io_wq_exit_workers().
+		 */
+		if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
+			io_wq_cancel_tw_create(wq);
+		io_worker_ref_put(wq);
 		return true;
 	}
+	io_worker_ref_put(wq);
 	clear_bit_unlock(0, &worker->create_state);
 fail_release:
 	io_worker_release(worker);
@@ -1198,13 +1209,9 @@ void io_wq_exit_start(struct io_wq *wq)
 	set_bit(IO_WQ_BIT_EXIT, &wq->state);
 }
 
-static void io_wq_exit_workers(struct io_wq *wq)
+static void io_wq_cancel_tw_create(struct io_wq *wq)
 {
 	struct callback_head *cb;
-	int node;
-
-	if (!wq->task)
-		return;
 
 	while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
 		struct io_worker *worker;
@@ -1212,6 +1219,16 @@ static void io_wq_exit_workers(struct io_wq *wq)
 		worker = container_of(cb, struct io_worker, create_work);
 		io_worker_cancel_cb(worker);
 	}
+}
+
+static void io_wq_exit_workers(struct io_wq *wq)
+{
+	int node;
+
+	if (!wq->task)
+		return;
+
+	io_wq_cancel_tw_create(wq);
 
 	rcu_read_lock();
 	for_each_node(node) {
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c4f217613f56..d5ab0e9a3f29 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -9824,7 +9824,7 @@ static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
 
 /*
  * Find any io_uring ctx that this task has registered or done IO on, and cancel
- * requests. @sqd should be not-null IIF it's an SQPOLL thread cancellation.
+ * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
  */
 static __cold void io_uring_cancel_generic(bool cancel_all,
 					   struct io_sq_data *sqd)
@@ -9866,8 +9866,10 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
 							     cancel_all);
 		}
 
-		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+		prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
+		io_run_task_work();
 		io_uring_drop_tctx_refs(current);
+
 		/*
 		 * If we've seen completions, retry without waiting. This
 		 * avoids a race where a completion comes in before we did