summary refs log tree commit diff
path: root/fs/xfs/xfs_mru_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_mru_cache.c')
-rw-r--r--fs/xfs/xfs_mru_cache.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 012209eda35d..a0b2c0a2589a 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -225,10 +225,14 @@ _xfs_mru_cache_list_insert(
  * list need to be deleted.  For each element this involves removing it from the
  * data store, removing it from the reap list, calling the client's free
  * function and deleting the element from the element zone.
+ *
+ * We get called holding the mru->lock, which we drop and then reacquire.
+ * Sparse need special help with this to tell it we know what we are doing.
  */
 STATIC void
 _xfs_mru_cache_clear_reap_list(
-	xfs_mru_cache_t		*mru)
+	xfs_mru_cache_t		*mru) __releases(mru->lock) __acquires(mru->lock)
+
 {
 	xfs_mru_cache_elem_t	*elem, *next;
 	struct list_head	tmp;
@@ -528,6 +532,10 @@ xfs_mru_cache_delete(
  *
  * If the element isn't found, this function returns NULL and the spinlock is
  * released.  xfs_mru_cache_done() should NOT be called when this occurs.
+ *
+ * Because sparse isn't smart enough to know about conditional lock return
+ * status, we need to help it get it right by annotating the path that does
+ * not release the lock.
  */
 void *
 xfs_mru_cache_lookup(
@@ -545,8 +553,8 @@ xfs_mru_cache_lookup(
 	if (elem) {
 		list_del(&elem->list_node);
 		_xfs_mru_cache_list_insert(mru, elem);
-	}
-	else
+		__release(mru_lock); /* help sparse not be stupid */
+	} else
 		spin_unlock(&mru->lock);
 
 	return elem ? elem->value : NULL;
@@ -575,6 +583,8 @@ xfs_mru_cache_peek(
 	elem = radix_tree_lookup(&mru->store, key);
 	if (!elem)
 		spin_unlock(&mru->lock);
+	else
+		__release(mru_lock); /* help sparse not be stupid */
 
 	return elem ? elem->value : NULL;
 }
@@ -586,7 +596,7 @@ xfs_mru_cache_peek(
  */
 void
 xfs_mru_cache_done(
-	xfs_mru_cache_t	*mru)
+	xfs_mru_cache_t	*mru) __releases(mru->lock)
 {
 	spin_unlock(&mru->lock);
 }