summary refs log tree commit diff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-24 11:04:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-24 11:04:22 -0700
commit5146c8e4dfce5e6c671791b39d5f3c04bbd08715 (patch)
treebf5fc33d13781b8732d3df36bdb2e2887d56a35c /drivers
parentbcee19f424a0d8c26ecf2607b73c690802658b29 (diff)
parent586b286b110e94eb31840ac5afc0c24e0881fe34 (diff)
downloadlinux-5146c8e4dfce5e6c671791b39d5f3c04bbd08715.tar.gz
Merge tag 'dm-4.3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper fixes from Mike Snitzer:
 "Two stable@ fixes:

   - DM thinp fix to properly advertise discard support as disabled for
     thin devices backed by a thin-pool with discard support disabled.

   - DM crypt fix to prevent the creation of bios that violate the
     underlying block device's max_segments limits.  This fixes a
     relatively long-standing NCQ SSD corruption issue reported against
     dm-crypt ever since the dm-crypt cpu parallelization patches were
     merged back in 4.0"

* tag 'dm-4.3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE
  dm thin: disable discard support for thin devices if pool's is disabled
Diffstat (limited to 'drivers')
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-thin.c4
2 files changed, 19 insertions, 2 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d60c88df5234..4b3b6f8aff0c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
 
 /*
  * Generate a new unfragmented bio with the given size
- * This should never violate the device limitations
+ * This should never violate the device limitations (but only because
+ * max_segment_size is being constrained to PAGE_SIZE).
  *
  * This function may be called concurrently. If we allocate from the mempool
  * concurrently, there is a possibility of deadlock. For example, if we have
@@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
 	return fn(ti, cc->dev, cc->start, ti->len, data);
 }
 
+static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+	/*
+	 * Unfortunate constraint that is required to avoid the potential
+	 * for exceeding underlying device's max_segments limits -- due to
+	 * crypt_alloc_buffer() possibly allocating pages for the encryption
+	 * bio that are not as physically contiguous as the original bio.
+	 */
+	limits->max_segment_size = PAGE_SIZE;
+}
+
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 14, 0},
+	.version = {1, 14, 1},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
@@ -2058,6 +2070,7 @@ static struct target_type crypt_target = {
 	.resume = crypt_resume,
 	.message = crypt_message,
 	.iterate_devices = crypt_iterate_devices,
+	.io_hints = crypt_io_hints,
 };
 
 static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 6578b7bc1fbb..6fcbfb063366 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4249,6 +4249,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
 	struct thin_c *tc = ti->private;
 	struct pool *pool = tc->pool;
+	struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
+
+	if (!pool_limits->discard_granularity)
+		return; /* pool's discard support is disabled */
 
 	limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
 	limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */