summary refs log tree commit diff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorYueHaibing <yuehaibing@huawei.com>2018-06-03 17:32:22 +0800
committerJason Gunthorpe <jgg@mellanox.com>2018-06-04 10:50:04 -0600
commit8c61b24585c44e1de337e45858129abce9c3a008 (patch)
treebb47f261166b8d4ab32458ba2c270407428a532e /drivers/infiniband
parentcb2595c1393b4a5211534e6f0a0fbad369e21ad8 (diff)
downloadlinux-8c61b24585c44e1de337e45858129abce9c3a008.tar.gz
IB/hns: Use zeroing memory allocator instead of allocator/memset
Use dma_zalloc_coherent for allocating zeroed memory and
remove unnecessary memset function.

Signed-off-by: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index a40ec939ece5..46f65f9f59d0 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,7 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 		buf->npages = 1 << order;
 		buf->page_shift = page_shift;
 		/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
-		buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
+		buf->direct.buf = dma_zalloc_coherent(dev,
+						      size, &t, GFP_KERNEL);
 		if (!buf->direct.buf)
 			return -ENOMEM;
 
@@ -207,8 +208,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 			--buf->page_shift;
 			buf->npages *= 2;
 		}
-
-		memset(buf->direct.buf, 0, size);
 	} else {
 		buf->nbufs = (size + page_size - 1) / page_size;
 		buf->npages = buf->nbufs;
@@ -220,7 +219,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 			return -ENOMEM;
 
 		for (i = 0; i < buf->nbufs; ++i) {
-			buf->page_list[i].buf = dma_alloc_coherent(dev,
+			buf->page_list[i].buf = dma_zalloc_coherent(dev,
 								  page_size, &t,
 								  GFP_KERNEL);
 
@@ -228,7 +227,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
 				goto err_free;
 
 			buf->page_list[i].map = t;
-			memset(buf->page_list[i].buf, 0, page_size);
 		}
 	}