summary refs log tree commit diff
path: root/kernel/user.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-25 15:23:07 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-31 11:30:18 -0800
commit4021cb279a532728c3208a16b9b09b0ca8016850 (patch)
tree1103bc655772ea388eb1fb2b259797bc9c703926 /kernel/user.c
parentd5bee775137c56ed993f1b3c9d66c268b3525d7d (diff)
downloadlinux-4021cb279a532728c3208a16b9b09b0ca8016850.tar.gz
[PATCH] fix uidhash_lock <-> RCU deadlock
RCU task-struct freeing can call free_uid(), which is taking
uidhash_lock - while other users of uidhash_lock are softirq-unsafe.

The fix is to always take the uidhash_spinlock in a softirq-safe manner.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Paul E. McKenney <paulmck@us.ibm.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/user.c')
-rw-r--r--kernel/user.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/kernel/user.c b/kernel/user.c
index 89e562feb1b1..d1ae2349347e 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include <linux/bitops.h>
 #include <linux/key.h>
+#include <linux/interrupt.h>
 
 /*
  * UID task count cache, to get fast user lookup in "alloc_uid"
@@ -27,6 +28,12 @@
 
 static kmem_cache_t *uid_cachep;
 static struct list_head uidhash_table[UIDHASH_SZ];
+
+/*
+ * The uidhash_lock is mostly taken from process context, but it is
+ * occasionally also taken from softirq/tasklet context, when
+ * task-structs get RCU-freed. Hence all locking must be softirq-safe.
+ */
 static DEFINE_SPINLOCK(uidhash_lock);
 
 struct user_struct root_user = {
@@ -83,14 +90,15 @@ struct user_struct *find_user(uid_t uid)
 {
 	struct user_struct *ret;
 
-	spin_lock(&uidhash_lock);
+	spin_lock_bh(&uidhash_lock);
 	ret = uid_hash_find(uid, uidhashentry(uid));
-	spin_unlock(&uidhash_lock);
+	spin_unlock_bh(&uidhash_lock);
 	return ret;
 }
 
 void free_uid(struct user_struct *up)
 {
+	local_bh_disable();
 	if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
 		uid_hash_remove(up);
 		key_put(up->uid_keyring);
@@ -98,6 +106,7 @@ void free_uid(struct user_struct *up)
 		kmem_cache_free(uid_cachep, up);
 		spin_unlock(&uidhash_lock);
 	}
+	local_bh_enable();
 }
 
 struct user_struct * alloc_uid(uid_t uid)
@@ -105,9 +114,9 @@ struct user_struct * alloc_uid(uid_t uid)
 	struct list_head *hashent = uidhashentry(uid);
 	struct user_struct *up;
 
-	spin_lock(&uidhash_lock);
+	spin_lock_bh(&uidhash_lock);
 	up = uid_hash_find(uid, hashent);
-	spin_unlock(&uidhash_lock);
+	spin_unlock_bh(&uidhash_lock);
 
 	if (!up) {
 		struct user_struct *new;
@@ -137,7 +146,7 @@ struct user_struct * alloc_uid(uid_t uid)
 		 * Before adding this, check whether we raced
 		 * on adding the same user already..
 		 */
-		spin_lock(&uidhash_lock);
+		spin_lock_bh(&uidhash_lock);
 		up = uid_hash_find(uid, hashent);
 		if (up) {
 			key_put(new->uid_keyring);
@@ -147,7 +156,7 @@ struct user_struct * alloc_uid(uid_t uid)
 			uid_hash_insert(new, hashent);
 			up = new;
 		}
-		spin_unlock(&uidhash_lock);
+		spin_unlock_bh(&uidhash_lock);
 
 	}
 	return up;
@@ -183,9 +192,9 @@ static int __init uid_cache_init(void)
 		INIT_LIST_HEAD(uidhash_table + n);
 
 	/* Insert the root user immediately (init already runs as root) */
-	spin_lock(&uidhash_lock);
+	spin_lock_bh(&uidhash_lock);
 	uid_hash_insert(&root_user, uidhashentry(0));
-	spin_unlock(&uidhash_lock);
+	spin_unlock_bh(&uidhash_lock);
 
 	return 0;
 }