summary refs log tree commit diff
path: root/lib/test_xarray.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-11-05 13:19:54 -0500
committerMatthew Wilcox <willy@infradead.org>2018-12-06 09:25:15 -0500
commitb7677a132a4c2ff877986c5b30f9427127b9897a (patch)
treed93228d64009d3398ebdffe20515c7152db0950d /lib/test_xarray.c
parent55f3f7eab75c10d9b33d122670b5935ab64db50f (diff)
downloadlinux-b7677a132a4c2ff877986c5b30f9427127b9897a.tar.gz
XArray tests: Handle larger indices more elegantly
xa_mk_value() only handles values up to LONG_MAX.  I successfully hid
that inside xa_store_index() and xa_erase_index(), but it turned out I
also needed it for testing xa_alloc() on 32-bit machines.  So extract
xa_mk_index() from the above two functions, and convert the non-constant
users of xa_mk_value() to xa_mk_index().

Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib/test_xarray.c')
-rw-r--r--lib/test_xarray.c91
1 files changed, 48 insertions, 43 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 0598e86af8fc..d30fd907a3dd 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { }
 } while (0)
 #endif
 
+static void *xa_mk_index(unsigned long index)
+{
+	return xa_mk_value(index & LONG_MAX);
+}
+
 static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
 {
-	return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
+	return xa_store(xa, index, xa_mk_index(index), gfp);
 }
 
 static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
 {
 	u32 id = 0;
 
-	XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX),
+	XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
 				gfp) != 0);
 	XA_BUG_ON(xa, id != index);
 }
 
 static void xa_erase_index(struct xarray *xa, unsigned long index)
 {
-	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
+	XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
 	XA_BUG_ON(xa, xa_load(xa, index) != NULL);
 }
 
@@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa)
 
 	xas_set(&xas, 0);
 	xas_for_each(&xas, entry, ULONG_MAX) {
-		xas_store(&xas, xa_mk_value(xas.xa_index));
+		xas_store(&xas, xa_mk_index(xas.xa_index));
 	}
 	xas_unlock(&xas);
 
@@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
 		XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
 		xa_set_mark(xa, index + 2, XA_MARK_1);
 		XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
-		xa_store_order(xa, index, order, xa_mk_value(index),
+		xa_store_order(xa, index, order, xa_mk_index(index),
 				GFP_KERNEL);
 		for (i = base; i < next; i++) {
 			XA_STATE(xas, xa, i);
@@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa)
 			xas_set(&xas, j);
 			do {
 				xas_lock(&xas);
-				xas_store(&xas, xa_mk_value(j));
+				xas_store(&xas, xa_mk_index(j));
 				xas_unlock(&xas);
 			} while (xas_nomem(&xas, GFP_KERNEL));
 		}
@@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa)
 		xas_set(&xas, 0);
 		j = i;
 		xas_for_each(&xas, entry, ULONG_MAX) {
-			XA_BUG_ON(xa, entry != xa_mk_value(j));
+			XA_BUG_ON(xa, entry != xa_mk_index(j));
 			xas_store(&xas, NULL);
 			j++;
 		}
@@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
 	unsigned long min = index & ~((1UL << order) - 1);
 	unsigned long max = min + (1UL << order);
 
-	xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL);
-	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index));
-	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index));
+	xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
+	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 
 	xas_lock(&xas);
-	XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+	XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
 	xas_unlock(&xas);
-	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
-	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
+	XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
+	XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
 	XA_BUG_ON(xa, xa_load(xa, max) != NULL);
 	XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 
@@ -523,15 +528,15 @@ static noinline void check_multi_store(struct xarray *xa)
 
 	for (i = 0; i < max_order; i++) {
 		for (j = 0; j < max_order; j++) {
-			xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
-			xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
+			xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
+			xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
 
 			for (k = 0; k < max_order; k++) {
 				void *entry = xa_load(xa, (1UL << k) - 1);
 				if ((i < k) && (j < k))
 					XA_BUG_ON(xa, entry != NULL);
 				else
-					XA_BUG_ON(xa, entry != xa_mk_value(j));
+					XA_BUG_ON(xa, entry != xa_mk_index(j));
 			}
 
 			xa_erase(xa, 0);
@@ -587,13 +592,13 @@ static noinline void check_xa_alloc(void)
 	xa_destroy(&xa0);
 
 	id = 0xfffffffeU;
-	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
 				GFP_KERNEL) != 0);
 	XA_BUG_ON(&xa0, id != 0xfffffffeU);
-	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
 				GFP_KERNEL) != 0);
 	XA_BUG_ON(&xa0, id != 0xffffffffU);
-	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+	XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
 				GFP_KERNEL) != -ENOSPC);
 	XA_BUG_ON(&xa0, id != 0xffffffffU);
 	xa_destroy(&xa0);
@@ -610,11 +615,11 @@ retry:
 	xas_lock(&xas);
 	xas_for_each_conflict(&xas, entry) {
 		XA_BUG_ON(xa, !xa_is_value(entry));
-		XA_BUG_ON(xa, entry < xa_mk_value(start));
-		XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1));
+		XA_BUG_ON(xa, entry < xa_mk_index(start));
+		XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
 		count++;
 	}
-	xas_store(&xas, xa_mk_value(start));
+	xas_store(&xas, xa_mk_index(start));
 	xas_unlock(&xas);
 	if (xas_nomem(&xas, GFP_KERNEL)) {
 		count = 0;
@@ -622,9 +627,9 @@ retry:
 	}
 	XA_BUG_ON(xa, xas_error(&xas));
 	XA_BUG_ON(xa, count != present);
-	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start));
+	XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
 	XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
-			xa_mk_value(start));
+			xa_mk_index(start));
 	xa_erase_index(xa, start);
 }
 
@@ -703,7 +708,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
 		for (j = 0; j < index; j++) {
 			XA_STATE(xas, xa, j + index);
 			xa_store_index(xa, index - 1, GFP_KERNEL);
-			xa_store_order(xa, index, i, xa_mk_value(index),
+			xa_store_order(xa, index, i, xa_mk_index(index),
 					GFP_KERNEL);
 			rcu_read_lock();
 			xas_for_each(&xas, entry, ULONG_MAX) {
@@ -778,7 +783,7 @@ static noinline void check_find_2(struct xarray *xa)
 		j = 0;
 		index = 0;
 		xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
-			XA_BUG_ON(xa, xa_mk_value(index) != entry);
+			XA_BUG_ON(xa, xa_mk_index(index) != entry);
 			XA_BUG_ON(xa, index != j++);
 		}
 	}
@@ -829,11 +834,11 @@ static noinline void check_find_entry(struct xarray *xa)
 			for (index = 0; index < (1UL << (order + 5));
 			     index += (1UL << order)) {
 				xa_store_order(xa, index, order,
-						xa_mk_value(index), GFP_KERNEL);
+						xa_mk_index(index), GFP_KERNEL);
 				XA_BUG_ON(xa, xa_load(xa, index) !=
-						xa_mk_value(index));
+						xa_mk_index(index));
 				XA_BUG_ON(xa, xa_find_entry(xa,
-						xa_mk_value(index)) != index);
+						xa_mk_index(index)) != index);
 			}
 			XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
 			xa_destroy(xa);
@@ -844,7 +849,7 @@ static noinline void check_find_entry(struct xarray *xa)
 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
 	xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
 	XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
-	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1);
+	XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
 	xa_erase_index(xa, ULONG_MAX);
 	XA_BUG_ON(xa, !xa_empty(xa));
 }
@@ -864,7 +869,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
 		XA_BUG_ON(xa, xas.xa_index != i);
 		if (i == 0 || i == idx)
-			XA_BUG_ON(xa, entry != xa_mk_value(i));
+			XA_BUG_ON(xa, entry != xa_mk_index(i));
 		else
 			XA_BUG_ON(xa, entry != NULL);
 	}
@@ -878,7 +883,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
 			XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
 		XA_BUG_ON(xa, xas.xa_index != i);
 		if (i == 0 || i == idx)
-			XA_BUG_ON(xa, entry != xa_mk_value(i));
+			XA_BUG_ON(xa, entry != xa_mk_index(i));
 		else
 			XA_BUG_ON(xa, entry != NULL);
 	} while (i > 0);
@@ -909,7 +914,7 @@ static noinline void check_move(struct xarray *xa)
 	do {
 		void *entry = xas_prev(&xas);
 		i--;
-		XA_BUG_ON(xa, entry != xa_mk_value(i));
+		XA_BUG_ON(xa, entry != xa_mk_index(i));
 		XA_BUG_ON(xa, i != xas.xa_index);
 	} while (i != 0);
 
@@ -918,7 +923,7 @@ static noinline void check_move(struct xarray *xa)
 
 	do {
 		void *entry = xas_next(&xas);
-		XA_BUG_ON(xa, entry != xa_mk_value(i));
+		XA_BUG_ON(xa, entry != xa_mk_index(i));
 		XA_BUG_ON(xa, i != xas.xa_index);
 		i++;
 	} while (i < (1 << 16));
@@ -934,7 +939,7 @@ static noinline void check_move(struct xarray *xa)
 		void *entry = xas_prev(&xas);
 		i--;
 		if ((i < (1 << 8)) || (i >= (1 << 15)))
-			XA_BUG_ON(xa, entry != xa_mk_value(i));
+			XA_BUG_ON(xa, entry != xa_mk_index(i));
 		else
 			XA_BUG_ON(xa, entry != NULL);
 		XA_BUG_ON(xa, i != xas.xa_index);
@@ -946,7 +951,7 @@ static noinline void check_move(struct xarray *xa)
 	do {
 		void *entry = xas_next(&xas);
 		if ((i < (1 << 8)) || (i >= (1 << 15)))
-			XA_BUG_ON(xa, entry != xa_mk_value(i));
+			XA_BUG_ON(xa, entry != xa_mk_index(i));
 		else
 			XA_BUG_ON(xa, entry != NULL);
 		XA_BUG_ON(xa, i != xas.xa_index);
@@ -976,7 +981,7 @@ static noinline void xa_store_many_order(struct xarray *xa,
 		if (xas_error(&xas))
 			goto unlock;
 		for (i = 0; i < (1U << order); i++) {
-			XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i)));
+			XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
 			xas_next(&xas);
 		}
 unlock:
@@ -1031,9 +1036,9 @@ static noinline void check_create_range_4(struct xarray *xa,
 		if (xas_error(&xas))
 			goto unlock;
 		for (i = 0; i < (1UL << order); i++) {
-			void *old = xas_store(&xas, xa_mk_value(base + i));
+			void *old = xas_store(&xas, xa_mk_index(base + i));
 			if (xas.xa_index == index)
-				XA_BUG_ON(xa, old != xa_mk_value(base + i));
+				XA_BUG_ON(xa, old != xa_mk_index(base + i));
 			else
 				XA_BUG_ON(xa, old != NULL);
 			xas_next(&xas);
@@ -1085,10 +1090,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first,
 		unsigned long last)
 {
 #ifdef CONFIG_XARRAY_MULTI
-	xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL);
+	xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
 
-	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first));
-	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first));
+	XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
+	XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
 	XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
 	XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
 
@@ -1195,7 +1200,7 @@ static noinline void check_account(struct xarray *xa)
 		XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
 		rcu_read_unlock();
 
-		xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
+		xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
 				GFP_KERNEL);
 		XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);