summary refs log tree commit diff
path: root/crypto
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-18 15:55:59 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-18 15:55:59 -0700
commit13bf2cf9e2d1e0e56088ec6342c2726704100647 (patch)
treeb75f76b2376244e64471dd5c6867aaaf3cb0298c /crypto
parentbbd60bffaf780464298cb7a39852f7f1065f1726 (diff)
parent3257d86182cc27eda83d6854787256641f7c574b (diff)
downloadlinux-13bf2cf9e2d1e0e56088ec6342c2726704100647.tar.gz
Merge tag 'dmaengine-4.19-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull DMAengine updates from Vinod Koul:
 "This round brings couple of framework changes, a new driver and usual
  driver updates:

   - new managed helper for dmaengine framework registration

   - split dmaengine pause capability to pause and resume and allow
     drivers to report that individually

   - update dma_request_chan_by_mask() to handle deferred probing

   - move imx-sdma to use virt-dma

   - new driver for Actions Semi Owl family S900 controller

   - minor updates to intel, renesas, mv_xor, pl330 etc"

* tag 'dmaengine-4.19-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (46 commits)
  dmaengine: Add Actions Semi Owl family S900 DMA driver
  dt-bindings: dmaengine: Add binding for Actions Semi Owl SoCs
  dmaengine: sh: rcar-dmac: Should not stop the DMAC by rcar_dmac_sync_tcr()
  dmaengine: mic_x100_dma: use the new helper to simplify the code
  dmaengine: add a new helper dmaenginem_async_device_register
  dmaengine: imx-sdma: add memcpy interface
  dmaengine: imx-sdma: add SDMA_BD_MAX_CNT to replace '0xffff'
  dmaengine: dma_request_chan_by_mask() to handle deferred probing
  dmaengine: pl330: fix irq race with terminate_all
  dmaengine: Revert "dmaengine: mv_xor_v2: enable COMPILE_TEST"
  dmaengine: mv_xor_v2: use {lower,upper}_32_bits to configure HW descriptor address
  dmaengine: mv_xor_v2: enable COMPILE_TEST
  dmaengine: mv_xor_v2: move unmap to before callback
  dmaengine: mv_xor_v2: convert callback to helper function
  dmaengine: mv_xor_v2: kill the tasklets upon exit
  dmaengine: mv_xor_v2: explicitly freeup irq
  dmaengine: sh: rcar-dmac: Add dma_pause operation
  dmaengine: sh: rcar-dmac: add a new function to clear CHCR.DE with barrier
  dmaengine: idma64: Support dmaengine_terminate_sync()
  dmaengine: hsu: Support dmaengine_terminate_sync()
  ...
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_pq.c10
-rw-r--r--crypto/async_tx/raid6test.c4
2 files changed, 9 insertions, 5 deletions
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index 56bd612927ab..80dc567801ec 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -42,6 +42,8 @@ static struct page *pq_scribble_page;
 #define P(b, d) (b[d-2])
 #define Q(b, d) (b[d-1])
 
+#define MAX_DISKS 255
+
 /**
  * do_async_gen_syndrome - asynchronously calculate P and/or Q
  */
@@ -184,7 +186,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 	struct dma_device *device = chan ? chan->device : NULL;
 	struct dmaengine_unmap_data *unmap = NULL;
 
-	BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
+	BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks)));
 
 	if (device)
 		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
@@ -196,7 +198,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 	    is_dma_pq_aligned(device, offset, 0, len)) {
 		struct dma_async_tx_descriptor *tx;
 		enum dma_ctrl_flags dma_flags = 0;
-		unsigned char coefs[src_cnt];
+		unsigned char coefs[MAX_DISKS];
 		int i, j;
 
 		/* run the p+q asynchronously */
@@ -299,11 +301,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
 	struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
 	struct dma_device *device = chan ? chan->device : NULL;
 	struct dma_async_tx_descriptor *tx;
-	unsigned char coefs[disks-2];
+	unsigned char coefs[MAX_DISKS];
 	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
 	struct dmaengine_unmap_data *unmap = NULL;
 
-	BUG_ON(disks < 4);
+	BUG_ON(disks < 4 || disks > MAX_DISKS);
 
 	if (device)
 		unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c
index dad95f45b88f..a5edaabae12a 100644
--- a/crypto/async_tx/raid6test.c
+++ b/crypto/async_tx/raid6test.c
@@ -81,11 +81,13 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru
 			init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv);
 			tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit);
 		} else {
-			struct page *blocks[disks];
+			struct page *blocks[NDISKS];
 			struct page *dest;
 			int count = 0;
 			int i;
 
+			BUG_ON(disks > NDISKS);
+
 			/* data+Q failure.  Reconstruct data from P,
 			 * then rebuild syndrome
 			 */