summary refs log tree commit diff
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2016-11-28 09:22:47 -0700
committerJens Axboe <axboe@fb.com>2016-11-28 10:27:03 -0700
commit80e091d10e8bf7b801d634ea8870b9e907314424 (patch)
tree05d99a001b158de7e3f6f9eb119f28bb226b8bc2
parentfeffa5cc7b47f38210d4997ceb3fe30881d6c337 (diff)
downloadlinux-80e091d10e8bf7b801d634ea8870b9e907314424.tar.gz
blk-wbt: allow reset of default latency through sysfs
Allow a write of '-1' to reset the default latency target for
a given device. This removes knowledge of the different default
settings for rotational vs non-rotational from user space.

Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--Documentation/block/queue-sysfs.txt4
-rw-r--r--block/blk-sysfs.c22
-rw-r--r--block/blk-wbt.c17
-rw-r--r--block/blk-wbt.h6
4 files changed, 37 insertions, 12 deletions
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 14235e72a702..51642159aedb 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -188,7 +188,9 @@ wb_lat_usec (RW)
 If the device is registered for writeback throttling, then this file shows
 the target minimum read latency. If this latency is exceeded in a given
 window of time (see wb_window_usec), then the writeback throttling will start
-scaling back writes.
+scaling back writes. Writing a value of '0' to this file disables the
+feature. Writing a value of '-1' to this file resets the value to the
+default setting.
 
 
 Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 1855c6770045..f0ca569e276b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -42,12 +42,12 @@ queue_var_store(unsigned long *var, const char *page, size_t count)
 	return count;
 }
 
-static ssize_t queue_var_store64(u64 *var, const char *page)
+static ssize_t queue_var_store64(s64 *var, const char *page)
 {
 	int err;
-	u64 v;
+	s64 v;
 
-	err = kstrtou64(page, 10, &v);
+	err = kstrtos64(page, 10, &v);
 	if (err < 0)
 		return err;
 
@@ -421,18 +421,26 @@ static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
 				  size_t count)
 {
+	struct rq_wb *rwb;
 	ssize_t ret;
-	u64 val;
+	s64 val;
 
-	if (!q->rq_wb)
+	rwb = q->rq_wb;
+	if (!rwb)
 		return -EINVAL;
 
 	ret = queue_var_store64(&val, page);
 	if (ret < 0)
 		return ret;
 
-	q->rq_wb->min_lat_nsec = val * 1000ULL;
-	wbt_update_limits(q->rq_wb);
+	if (val == -1)
+		rwb->min_lat_nsec = wbt_default_latency_nsec(q);
+	else if (val >= 0)
+		rwb->min_lat_nsec = val * 1000ULL;
+	else
+		return -EINVAL;
+
+	wbt_update_limits(rwb);
 	return count;
 }
 
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 9f97594e68ce..92df2f7c5af1 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -675,6 +675,18 @@ void wbt_disable(struct rq_wb *rwb)
 }
 EXPORT_SYMBOL_GPL(wbt_disable);
 
+u64 wbt_default_latency_nsec(struct request_queue *q)
+{
+	/*
+	 * We default to 2msec for non-rotational storage, and 75msec
+	 * for rotational storage.
+	 */
+	if (blk_queue_nonrot(q))
+		return 2000000ULL;
+	else
+		return 75000000ULL;
+}
+
 int wbt_init(struct request_queue *q)
 {
 	struct rq_wb *rwb;
@@ -711,10 +723,7 @@ int wbt_init(struct request_queue *q)
 	q->rq_wb = rwb;
 	blk_stat_enable(q);
 
-	if (blk_queue_nonrot(q))
-		rwb->min_lat_nsec = 2000000ULL;
-	else
-		rwb->min_lat_nsec = 75000000ULL;
+	rwb->min_lat_nsec = wbt_default_latency_nsec(q);
 
 	wbt_set_queue_depth(rwb, blk_queue_depth(q));
 	wbt_set_write_cache(rwb, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 44dc2173dc1f..9dfc88ad7f30 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -110,6 +110,8 @@ void wbt_disable(struct rq_wb *);
 void wbt_set_queue_depth(struct rq_wb *, unsigned int);
 void wbt_set_write_cache(struct rq_wb *, bool);
 
+u64 wbt_default_latency_nsec(struct request_queue *);
+
 #else
 
 static inline void __wbt_done(struct rq_wb *rwb, enum wbt_flags flags)
@@ -148,6 +150,10 @@ static inline void wbt_set_queue_depth(struct rq_wb *rwb, unsigned int depth)
 static inline void wbt_set_write_cache(struct rq_wb *rwb, bool wc)
 {
 }
+static inline u64 wbt_default_latency_nsec(struct request_queue *q)
+{
+	return 0;
+}
 
 #endif /* CONFIG_BLK_WBT */