summary refs log tree commit diff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2019-05-16 15:19:46 +0800
committerDavid S. Miller <davem@davemloft.net>2019-05-16 09:45:20 -0700
commitba6306e3f648a857ae52ddcabc2859542fd2f94c (patch)
tree8d9582c2d800f1a7cfa3daef5afb67f263ead579 /lib/rhashtable.c
parentc7d5ec26ea4adf450d9ab2b794e7735761a93af1 (diff)
downloadlinux-ba6306e3f648a857ae52ddcabc2859542fd2f94c.tar.gz
rhashtable: Remove RCU marking from rhash_lock_head
The opaque type rhash_lock_head should not be marked with __rcu
because it can never be dereferenced.  We should apply the RCU
marking when we turn it into a pointer which can be dereferenced.

This patch does exactly that.  This fixes a number of sparse
warnings as well as getting rid of some unnecessary RCU checking.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6529fe1b45c1..7708699a5b96 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -34,7 +34,7 @@
 
 union nested_table {
 	union nested_table __rcu *table;
-	struct rhash_lock_head __rcu *bucket;
+	struct rhash_lock_head *bucket;
 };
 
 static u32 head_hashfn(struct rhashtable *ht,
@@ -216,7 +216,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
 }
 
 static int rhashtable_rehash_one(struct rhashtable *ht,
-				 struct rhash_lock_head __rcu **bkt,
+				 struct rhash_lock_head **bkt,
 				 unsigned int old_hash)
 {
 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
@@ -269,7 +269,7 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
 				    unsigned int old_hash)
 {
 	struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
-	struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
+	struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash);
 	int err;
 
 	if (!bkt)
@@ -478,7 +478,7 @@ fail:
 }
 
 static void *rhashtable_lookup_one(struct rhashtable *ht,
-				   struct rhash_lock_head __rcu **bkt,
+				   struct rhash_lock_head **bkt,
 				   struct bucket_table *tbl, unsigned int hash,
 				   const void *key, struct rhash_head *obj)
 {
@@ -529,7 +529,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
 }
 
 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
-						  struct rhash_lock_head __rcu **bkt,
+						  struct rhash_lock_head **bkt,
 						  struct bucket_table *tbl,
 						  unsigned int hash,
 						  struct rhash_head *obj,
@@ -584,7 +584,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
 {
 	struct bucket_table *new_tbl;
 	struct bucket_table *tbl;
-	struct rhash_lock_head __rcu **bkt;
+	struct rhash_lock_head **bkt;
 	unsigned int hash;
 	void *data;
 
@@ -1166,8 +1166,8 @@ void rhashtable_destroy(struct rhashtable *ht)
 }
 EXPORT_SYMBOL_GPL(rhashtable_destroy);
 
-struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
-						   unsigned int hash)
+struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
+					     unsigned int hash)
 {
 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
 	unsigned int index = hash & ((1 << tbl->nest) - 1);
@@ -1195,10 +1195,10 @@ struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tb
 }
 EXPORT_SYMBOL_GPL(__rht_bucket_nested);
 
-struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
-						 unsigned int hash)
+struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
+					   unsigned int hash)
 {
-	static struct rhash_lock_head __rcu *rhnull;
+	static struct rhash_lock_head *rhnull;
 
 	if (!rhnull)
 		INIT_RHT_NULLS_HEAD(rhnull);
@@ -1206,9 +1206,9 @@ struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
 }
 EXPORT_SYMBOL_GPL(rht_bucket_nested);
 
-struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
-							struct bucket_table *tbl,
-							unsigned int hash)
+struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
+						  struct bucket_table *tbl,
+						  unsigned int hash)
 {
 	const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
 	unsigned int index = hash & ((1 << tbl->nest) - 1);