summary refs log tree commit diff
path: root/arch/parisc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 15:13:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 15:13:55 -0700
commit9e3a25dc992dd9f3170fb643bdd95da5ca9c5576 (patch)
treef636ae59fa83c83e837a6668b2693175a6e39f3a /arch/parisc
parent9787aed57dd33ba5c15a713c2c50e78baeb5052d (diff)
parent15ffe5e1acf5fe1512e98b20702e46ce9f25e2f7 (diff)
downloadlinux-9e3a25dc992dd9f3170fb643bdd95da5ca9c5576.tar.gz
Merge tag 'dma-mapping-5.3' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:

 - move the USB special case that bounced DMA through a device bar into
   the USB code instead of handling it in the common DMA code (Laurentiu
   Tudor and Fredrik Noring)

 - don't dip into the global CMA pool for single page allocations
   (Nicolin Chen)

 - fix a crash when allocating memory for the atomic pool failed during
   boot (Florian Fainelli)

 - move support for MIPS-style uncached segments to the common code and
   use that for MIPS and nios2 (me)

 - make support for DMA_ATTR_NON_CONSISTENT and
   DMA_ATTR_NO_KERNEL_MAPPING generic (me)

 - convert nds32 to the generic remapping allocator (me)

* tag 'dma-mapping-5.3' of git://git.infradead.org/users/hch/dma-mapping: (29 commits)
  dma-mapping: mark dma_alloc_need_uncached as __always_inline
  MIPS: only select ARCH_HAS_UNCACHED_SEGMENT for non-coherent platforms
  usb: host: Fix excessive alignment restriction for local memory allocations
  lib/genalloc.c: Add algorithm, align and zeroed family of DMA allocators
  nios2: use the generic uncached segment support in dma-direct
  nds32: use the generic remapping allocator for coherent DMA allocations
  arc: use the generic remapping allocator for coherent DMA allocations
  dma-direct: handle DMA_ATTR_NO_KERNEL_MAPPING in common code
  dma-direct: handle DMA_ATTR_NON_CONSISTENT in common code
  dma-mapping: add a dma_alloc_need_uncached helper
  openrisc: remove the partial DMA_ATTR_NON_CONSISTENT support
  arc: remove the partial DMA_ATTR_NON_CONSISTENT support
  arm-nommu: remove the partial DMA_ATTR_NON_CONSISTENT support
  ARM: dma-mapping: allow larger DMA mask than supported
  dma-mapping: truncate dma masks to what dma_addr_t can hold
  iommu/dma: Apply dma_{alloc,free}_contiguous functions
  dma-remap: Avoid de-referencing NULL atomic_pool
  MIPS: use the generic uncached segment support in dma-direct
  dma-direct: provide generic support for uncached kernel segments
  au1100fb: fix DMA API abuse
  ...
Diffstat (limited to 'arch/parisc')
-rw-r--r--arch/parisc/kernel/pci-dma.c48
1 files changed, 13 insertions, 35 deletions
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
index 239162355b58..ca35d9a76e50 100644
--- a/arch/parisc/kernel/pci-dma.c
+++ b/arch/parisc/kernel/pci-dma.c
@@ -394,17 +394,20 @@ pcxl_dma_init(void)
 
 __initcall(pcxl_dma_init);
 
-static void *pcxl_dma_alloc(struct device *dev, size_t size,
-		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size,
+		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
 {
 	unsigned long vaddr;
 	unsigned long paddr;
 	int order;
 
+	if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
+		return NULL;
+
 	order = get_order(size);
 	size = 1 << (order + PAGE_SHIFT);
 	vaddr = pcxl_alloc_range(size);
-	paddr = __get_free_pages(flag | __GFP_ZERO, order);
+	paddr = __get_free_pages(gfp | __GFP_ZERO, order);
 	flush_kernel_dcache_range(paddr, size);
 	paddr = __pa(paddr);
 	map_uncached_pages(vaddr, size, paddr);
@@ -421,44 +424,19 @@ static void *pcxl_dma_alloc(struct device *dev, size_t size,
 	return (void *)vaddr;
 }
 
-static void *pcx_dma_alloc(struct device *dev, size_t size,
-		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
-{
-	void *addr;
-
-	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
-		return NULL;
-
-	addr = (void *)__get_free_pages(flag | __GFP_ZERO, get_order(size));
-	if (addr)
-		*dma_handle = (dma_addr_t)virt_to_phys(addr);
-
-	return addr;
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size,
-		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
-{
-
-	if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl)
-		return pcxl_dma_alloc(dev, size, dma_handle, gfp, attrs);
-	else
-		return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
-}
-
 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 		dma_addr_t dma_handle, unsigned long attrs)
 {
 	int order = get_order(size);
 
-	if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
-		size = 1 << (order + PAGE_SHIFT);
-		unmap_uncached_pages((unsigned long)vaddr, size);
-		pcxl_free_range((unsigned long)vaddr, size);
+	WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
+		     boot_cpu_data.cpu_type != pcxl);
 
-		vaddr = __va(dma_handle);
-	}
-	free_pages((unsigned long)vaddr, get_order(size));
+	size = 1 << (order + PAGE_SHIFT);
+	unmap_uncached_pages((unsigned long)vaddr, size);
+	pcxl_free_range((unsigned long)vaddr, size);
+
+	free_pages((unsigned long)__va(dma_handle), order);
 }
 
 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,