summary refs log tree commit diff
path: root/sound/core/memalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'sound/core/memalloc.c')
-rw-r--r--sound/core/memalloc.c207
1 files changed, 195 insertions, 12 deletions
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index c7c943c661e6..99cd0f67daa1 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -10,6 +10,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/genalloc.h>
+#include <linux/highmem.h>
 #include <linux/vmalloc.h>
 #ifdef CONFIG_X86
 #include <asm/set_memory.h>
@@ -39,9 +40,11 @@ static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
 }
 
 /**
- * snd_dma_alloc_pages - allocate the buffer area according to the given type
+ * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
+ *	type and direction
  * @type: the DMA buffer type
  * @device: the device pointer
+ * @dir: DMA direction
  * @size: the buffer size to allocate
  * @dmab: buffer allocation record to store the allocated data
  *
@@ -51,8 +54,9 @@ static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
  * Return: Zero if the buffer with the given size is allocated successfully,
  * otherwise a negative value on error.
  */
-int snd_dma_alloc_pages(int type, struct device *device, size_t size,
-			struct snd_dma_buffer *dmab)
+int snd_dma_alloc_dir_pages(int type, struct device *device,
+			    enum dma_data_direction dir, size_t size,
+			    struct snd_dma_buffer *dmab)
 {
 	if (WARN_ON(!size))
 		return -ENXIO;
@@ -62,6 +66,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
 	size = PAGE_ALIGN(size);
 	dmab->dev.type = type;
 	dmab->dev.dev = device;
+	dmab->dev.dir = dir;
 	dmab->bytes = 0;
 	dmab->addr = 0;
 	dmab->private_data = NULL;
@@ -71,7 +76,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
 	dmab->bytes = size;
 	return 0;
 }
-EXPORT_SYMBOL(snd_dma_alloc_pages);
+EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
 
 /**
  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
@@ -129,9 +134,10 @@ static void __snd_release_pages(struct device *dev, void *res)
 }
 
 /**
- * snd_devm_alloc_pages - allocate the buffer and manage with devres
+ * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
  * @dev: the device pointer
  * @type: the DMA buffer type
+ * @dir: DMA direction
  * @size: the buffer size to allocate
  *
  * Allocate buffer pages depending on the given type and manage using devres.
@@ -144,7 +150,8 @@ static void __snd_release_pages(struct device *dev, void *res)
  * The function returns the snd_dma_buffer object at success, or NULL if failed.
  */
 struct snd_dma_buffer *
-snd_devm_alloc_pages(struct device *dev, int type, size_t size)
+snd_devm_alloc_dir_pages(struct device *dev, int type,
+			 enum dma_data_direction dir, size_t size)
 {
 	struct snd_dma_buffer *dmab;
 	int err;
@@ -157,7 +164,7 @@ snd_devm_alloc_pages(struct device *dev, int type, size_t size)
 	if (!dmab)
 		return NULL;
 
-	err = snd_dma_alloc_pages(type, dev, size, dmab);
+	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
 	if (err < 0) {
 		devres_free(dmab);
 		return NULL;
@@ -166,7 +173,7 @@ snd_devm_alloc_pages(struct device *dev, int type, size_t size)
 	devres_add(dev, dmab);
 	return dmab;
 }
-EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
+EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
 
 /**
  * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
@@ -185,6 +192,26 @@ int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
 }
 EXPORT_SYMBOL(snd_dma_buffer_mmap);
 
+#ifdef CONFIG_HAS_DMA
+/**
+ * snd_dma_buffer_sync - sync DMA buffer between CPU and device
+ * @dmab: buffer allocation information
+ * @mode: sync mode
+ */
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+			 enum snd_dma_sync_mode mode)
+{
+	const struct snd_malloc_ops *ops;
+
+	if (!dmab || !dmab->dev.need_sync)
+		return;
+	ops = snd_dma_get_ops(dmab);
+	if (ops && ops->sync)
+		ops->sync(dmab, mode);
+}
+EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
+#endif /* CONFIG_HAS_DMA */
+
 /**
  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
  * @dmab: buffer allocation information
@@ -468,6 +495,161 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
 	.mmap = snd_dma_wc_mmap,
 };
 #endif /* CONFIG_X86 */
+
+/*
+ * Non-contiguous pages allocator
+ */
+static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	struct sg_table *sgt;
+	void *p;
+
+	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
+				      DEFAULT_GFP, 0);
+	if (!sgt)
+		return NULL;
+	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
+	if (p)
+		dmab->private_data = sgt;
+	else
+		dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
+	return p;
+}
+
+static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
+{
+	dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
+	dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
+			       dmab->dev.dir);
+}
+
+static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
+				  struct vm_area_struct *area)
+{
+	return dma_mmap_noncontiguous(dmab->dev.dev, area,
+				      dmab->bytes, dmab->private_data);
+}
+
+static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
+				   enum snd_dma_sync_mode mode)
+{
+	if (mode == SNDRV_DMA_SYNC_CPU) {
+		if (dmab->dev.dir == DMA_TO_DEVICE)
+			return;
+		dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
+					 dmab->dev.dir);
+		invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
+	} else {
+		if (dmab->dev.dir == DMA_FROM_DEVICE)
+			return;
+		flush_kernel_vmap_range(dmab->area, dmab->bytes);
+		dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
+					    dmab->dev.dir);
+	}
+}
+
+static const struct snd_malloc_ops snd_dma_noncontig_ops = {
+	.alloc = snd_dma_noncontig_alloc,
+	.free = snd_dma_noncontig_free,
+	.mmap = snd_dma_noncontig_mmap,
+	.sync = snd_dma_noncontig_sync,
+	/* re-use vmalloc helpers for get_* ops */
+	.get_addr = snd_dma_vmalloc_get_addr,
+	.get_page = snd_dma_vmalloc_get_page,
+	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
+/* x86-specific SG-buffer with WC pages */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v))
+
+static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	void *p = snd_dma_noncontig_alloc(dmab, size);
+	size_t ofs;
+
+	if (!p)
+		return NULL;
+	for (ofs = 0; ofs < size; ofs += PAGE_SIZE)
+		set_memory_uc(vmalloc_to_virt(p + ofs), 1);
+	return p;
+}
+
+static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
+{
+	size_t ofs;
+
+	for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE)
+		set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1);
+	snd_dma_noncontig_free(dmab);
+}
+
+static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
+			      struct vm_area_struct *area)
+{
+	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+	/* FIXME: dma_mmap_noncontiguous() works? */
+	return -ENOENT; /* continue with the default mmap handler */
+}
+
+const struct snd_malloc_ops snd_dma_sg_wc_ops = {
+	.alloc = snd_dma_sg_wc_alloc,
+	.free = snd_dma_sg_wc_free,
+	.mmap = snd_dma_sg_wc_mmap,
+	.sync = snd_dma_noncontig_sync,
+	.get_addr = snd_dma_vmalloc_get_addr,
+	.get_page = snd_dma_vmalloc_get_page,
+	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+#endif /* CONFIG_SND_DMA_SGBUF */
+
+/*
+ * Non-coherent pages allocator
+ */
+static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+	return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
+				     dmab->dev.dir, DEFAULT_GFP);
+}
+
+static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
+{
+	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
+			     dmab->addr, dmab->dev.dir);
+}
+
+static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
+				    struct vm_area_struct *area)
+{
+	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
+	return dma_mmap_pages(dmab->dev.dev, area,
+			      area->vm_end - area->vm_start,
+			      virt_to_page(dmab->area));
+}
+
+static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
+				     enum snd_dma_sync_mode mode)
+{
+	if (mode == SNDRV_DMA_SYNC_CPU) {
+		if (dmab->dev.dir != DMA_TO_DEVICE)
+			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
+						dmab->bytes, dmab->dev.dir);
+	} else {
+		if (dmab->dev.dir != DMA_FROM_DEVICE)
+			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
+						   dmab->bytes, dmab->dev.dir);
+	}
+}
+
+static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
+	.alloc = snd_dma_noncoherent_alloc,
+	.free = snd_dma_noncoherent_free,
+	.mmap = snd_dma_noncoherent_mmap,
+	.sync = snd_dma_noncoherent_sync,
+};
+
 #endif /* CONFIG_HAS_DMA */
 
 /*
@@ -479,14 +661,15 @@ static const struct snd_malloc_ops *dma_ops[] = {
 #ifdef CONFIG_HAS_DMA
 	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
 	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
+	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
+	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
+#ifdef CONFIG_SND_DMA_SGBUF
+	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
+#endif
 #ifdef CONFIG_GENERIC_ALLOCATOR
 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
 #endif /* CONFIG_GENERIC_ALLOCATOR */
 #endif /* CONFIG_HAS_DMA */
-#ifdef CONFIG_SND_DMA_SGBUF
-	[SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
-	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
-#endif
 };
 
 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)