summary refs log tree commit diff
path: root/sound/core
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2021-11-01 07:33:49 +0100
committerTakashi Iwai <tiwai@suse.de>2021-11-01 07:34:19 +0100
commit8beea313507580ea2a18bf68f7589d40677c28ad (patch)
tree0a1cd76a9b2a414f9dcb64db2432cbf964be3a44 /sound/core
parent3ab7992018455ac63c33e9b3eaa7264e293e40f4 (diff)
parent2672e1970ab051f0333fdbc61a55b7616f4f5778 (diff)
downloadlinux-8beea313507580ea2a18bf68f7589d40677c28ad.tar.gz
Merge branch 'for-next' into for-linus
Merge 5.16-devel branch for upstreaming

Signed-off-by: Takashi Iwai <tiwai@suse.de>
Diffstat (limited to 'sound/core')
-rw-r--r--sound/core/Makefile3
-rw-r--r--sound/core/memalloc.c207
-rw-r--r--sound/core/memalloc_local.h1
-rw-r--r--sound/core/pcm_compat.c4
-rw-r--r--sound/core/pcm_lib.c5
-rw-r--r--sound/core/pcm_local.h7
-rw-r--r--sound/core/pcm_memory.c13
-rw-r--r--sound/core/pcm_native.c66
-rw-r--r--sound/core/sgbuf.c201
9 files changed, 270 insertions, 237 deletions
diff --git a/sound/core/Makefile b/sound/core/Makefile
index d774792850f3..350d704ced98 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -9,7 +9,9 @@ ifneq ($(CONFIG_SND_PROC_FS),)
 snd-y += info.o
 snd-$(CONFIG_SND_OSSEMUL) += info_oss.o
 endif
+ifneq ($(CONFIG_M68K),y)
 snd-$(CONFIG_ISA_DMA_API) += isadma.o
+endif
 snd-$(CONFIG_SND_OSSEMUL) += sound_oss.o
 snd-$(CONFIG_SND_VMASTER) += vmaster.o
 snd-$(CONFIG_SND_JACK)	  += ctljack.o jack.o
@@ -17,7 +19,6 @@ snd-$(CONFIG_SND_JACK)	  += ctljack.o jack.o
 snd-pcm-y := pcm.o pcm_native.o pcm_lib.o pcm_misc.o \
 		pcm_memory.o memalloc.o
 snd-pcm-$(CONFIG_SND_PCM_TIMER) += pcm_timer.o
-snd-pcm-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
 snd-pcm-$(CONFIG_SND_PCM_ELD) += pcm_drm_eld.o
 snd-pcm-$(CONFIG_SND_PCM_IEC958) += pcm_iec958.o
 
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index c7c943c661e6..99cd0f67daa1 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -10,6 +10,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/genalloc.h>
+#include <linux/highmem.h>
 #include <linux/vmalloc.h>
 #ifdef CONFIG_X86
 #include <asm/set_memory.h>
@@ -39,9 +40,11 @@ static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
 }
 
 /**
- * snd_dma_alloc_pages - allocate the buffer area according to the given type
+ * snd_dma_alloc_dir_pages - allocate the buffer area according to the given
+ *	type and direction
  * @type: the DMA buffer type
  * @device: the device pointer
+ * @dir: DMA direction
  * @size: the buffer size to allocate
  * @dmab: buffer allocation record to store the allocated data
  *
@@ -51,8 +54,9 @@ static void *__snd_dma_alloc_pages(struct snd_dma_buffer *dmab, size_t size)
  * Return: Zero if the buffer with the given size is allocated successfully,
  * otherwise a negative value on error.
  */
-int snd_dma_alloc_pages(int type, struct device *device, size_t size,
-			struct snd_dma_buffer *dmab)
+int snd_dma_alloc_dir_pages(int type, struct device *device,
+			    enum dma_data_direction dir, size_t size,
+			    struct snd_dma_buffer *dmab)
 {
 	if (WARN_ON(!size))
 		return -ENXIO;
@@ -62,6 +66,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
 	size = PAGE_ALIGN(size);
 	dmab->dev.type = type;
 	dmab->dev.dev = device;
+	dmab->dev.dir = dir;
 	dmab->bytes = 0;
 	dmab->addr = 0;
 	dmab->private_data = NULL;
@@ -71,7 +76,7 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
 	dmab->bytes = size;
 	return 0;
 }
-EXPORT_SYMBOL(snd_dma_alloc_pages);
+EXPORT_SYMBOL(snd_dma_alloc_dir_pages);
 
 /**
  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
@@ -129,9 +134,10 @@ static void __snd_release_pages(struct device *dev, void *res)
 }
 
 /**
- * snd_devm_alloc_pages - allocate the buffer and manage with devres
+ * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres
  * @dev: the device pointer
  * @type: the DMA buffer type
+ * @dir: DMA direction
  * @size: the buffer size to allocate
  *
  * Allocate buffer pages depending on the given type and manage using devres.
@@ -144,7 +150,8 @@ static void __snd_release_pages(struct device *dev, void *res)
  * The function returns the snd_dma_buffer object at success, or NULL if failed.
  */
 struct snd_dma_buffer *
-snd_devm_alloc_pages(struct device *dev, int type, size_t size)
+snd_devm_alloc_dir_pages(struct device *dev, int type,
+			 enum dma_data_direction dir, size_t size)
 {
 	struct snd_dma_buffer *dmab;
 	int err;
@@ -157,7 +164,7 @@ snd_devm_alloc_pages(struct device *dev, int type, size_t size)
 	if (!dmab)
 		return NULL;
 
-	err = snd_dma_alloc_pages(type, dev, size, dmab);
+	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
 	if (err < 0) {
 		devres_free(dmab);
 		return NULL;
@@ -166,7 +173,7 @@ snd_devm_alloc_pages(struct device *dev, int type, size_t size)
 	devres_add(dev, dmab);
 	return dmab;
 }
-EXPORT_SYMBOL_GPL(snd_devm_alloc_pages);
+EXPORT_SYMBOL_GPL(snd_devm_alloc_dir_pages);
 
 /**
  * snd_dma_buffer_mmap - perform mmap of the given DMA buffer
@@ -185,6 +192,26 @@ int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab,
 }
 EXPORT_SYMBOL(snd_dma_buffer_mmap);
 
+#ifdef CONFIG_HAS_DMA
+/**
+ * snd_dma_buffer_sync - sync DMA buffer between CPU and device
+ * @dmab: buffer allocation information
+ * @mode: sync mode
+ */
+void snd_dma_buffer_sync(struct snd_dma_buffer *dmab,
+			 enum snd_dma_sync_mode mode)
+{
+	const struct snd_malloc_ops *ops;
+
+	if (!dmab || !dmab->dev.need_sync)
+		return;
+	ops = snd_dma_get_ops(dmab);
+	if (ops && ops->sync)
+		ops->sync(dmab, mode);
+}
+EXPORT_SYMBOL_GPL(snd_dma_buffer_sync);
+#endif /* CONFIG_HAS_DMA */
+
 /**
  * snd_sgbuf_get_addr - return the physical address at the corresponding offset
  * @dmab: buffer allocation information
@@ -468,6 +495,161 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
 	.mmap = snd_dma_wc_mmap,
 };
 #endif /* CONFIG_X86 */
+
+/*
+ * Non-contiguous pages allocator
+ */
+static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	struct sg_table *sgt;
+	void *p;
+
+	sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
+				      DEFAULT_GFP, 0);
+	if (!sgt)
+		return NULL;
+	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+	p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
+	if (p)
+		dmab->private_data = sgt;
+	else
+		dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
+	return p;
+}
+
+static void snd_dma_noncontig_free(struct snd_dma_buffer *dmab)
+{
+	dma_vunmap_noncontiguous(dmab->dev.dev, dmab->area);
+	dma_free_noncontiguous(dmab->dev.dev, dmab->bytes, dmab->private_data,
+			       dmab->dev.dir);
+}
+
+static int snd_dma_noncontig_mmap(struct snd_dma_buffer *dmab,
+				  struct vm_area_struct *area)
+{
+	return dma_mmap_noncontiguous(dmab->dev.dev, area,
+				      dmab->bytes, dmab->private_data);
+}
+
+static void snd_dma_noncontig_sync(struct snd_dma_buffer *dmab,
+				   enum snd_dma_sync_mode mode)
+{
+	if (mode == SNDRV_DMA_SYNC_CPU) {
+		if (dmab->dev.dir == DMA_TO_DEVICE)
+			return;
+		dma_sync_sgtable_for_cpu(dmab->dev.dev, dmab->private_data,
+					 dmab->dev.dir);
+		invalidate_kernel_vmap_range(dmab->area, dmab->bytes);
+	} else {
+		if (dmab->dev.dir == DMA_FROM_DEVICE)
+			return;
+		flush_kernel_vmap_range(dmab->area, dmab->bytes);
+		dma_sync_sgtable_for_device(dmab->dev.dev, dmab->private_data,
+					    dmab->dev.dir);
+	}
+}
+
+static const struct snd_malloc_ops snd_dma_noncontig_ops = {
+	.alloc = snd_dma_noncontig_alloc,
+	.free = snd_dma_noncontig_free,
+	.mmap = snd_dma_noncontig_mmap,
+	.sync = snd_dma_noncontig_sync,
+	/* re-use vmalloc helpers for get_* ops */
+	.get_addr = snd_dma_vmalloc_get_addr,
+	.get_page = snd_dma_vmalloc_get_page,
+	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+
+/* x86-specific SG-buffer with WC pages */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v))
+
+static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	void *p = snd_dma_noncontig_alloc(dmab, size);
+	size_t ofs;
+
+	if (!p)
+		return NULL;
+	for (ofs = 0; ofs < size; ofs += PAGE_SIZE)
+		set_memory_uc(vmalloc_to_virt(p + ofs), 1);
+	return p;
+}
+
+static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
+{
+	size_t ofs;
+
+	for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE)
+		set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1);
+	snd_dma_noncontig_free(dmab);
+}
+
+static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
+			      struct vm_area_struct *area)
+{
+	area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+	/* FIXME: dma_mmap_noncontiguous() works? */
+	return -ENOENT; /* continue with the default mmap handler */
+}
+
+const struct snd_malloc_ops snd_dma_sg_wc_ops = {
+	.alloc = snd_dma_sg_wc_alloc,
+	.free = snd_dma_sg_wc_free,
+	.mmap = snd_dma_sg_wc_mmap,
+	.sync = snd_dma_noncontig_sync,
+	.get_addr = snd_dma_vmalloc_get_addr,
+	.get_page = snd_dma_vmalloc_get_page,
+	.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
+#endif /* CONFIG_SND_DMA_SGBUF */
+
+/*
+ * Non-coherent pages allocator
+ */
+static void *snd_dma_noncoherent_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+	dmab->dev.need_sync = dma_need_sync(dmab->dev.dev, dmab->dev.dir);
+	return dma_alloc_noncoherent(dmab->dev.dev, size, &dmab->addr,
+				     dmab->dev.dir, DEFAULT_GFP);
+}
+
+static void snd_dma_noncoherent_free(struct snd_dma_buffer *dmab)
+{
+	dma_free_noncoherent(dmab->dev.dev, dmab->bytes, dmab->area,
+			     dmab->addr, dmab->dev.dir);
+}
+
+static int snd_dma_noncoherent_mmap(struct snd_dma_buffer *dmab,
+				    struct vm_area_struct *area)
+{
+	area->vm_page_prot = vm_get_page_prot(area->vm_flags);
+	return dma_mmap_pages(dmab->dev.dev, area,
+			      area->vm_end - area->vm_start,
+			      virt_to_page(dmab->area));
+}
+
+static void snd_dma_noncoherent_sync(struct snd_dma_buffer *dmab,
+				     enum snd_dma_sync_mode mode)
+{
+	if (mode == SNDRV_DMA_SYNC_CPU) {
+		if (dmab->dev.dir != DMA_TO_DEVICE)
+			dma_sync_single_for_cpu(dmab->dev.dev, dmab->addr,
+						dmab->bytes, dmab->dev.dir);
+	} else {
+		if (dmab->dev.dir != DMA_FROM_DEVICE)
+			dma_sync_single_for_device(dmab->dev.dev, dmab->addr,
+						   dmab->bytes, dmab->dev.dir);
+	}
+}
+
+static const struct snd_malloc_ops snd_dma_noncoherent_ops = {
+	.alloc = snd_dma_noncoherent_alloc,
+	.free = snd_dma_noncoherent_free,
+	.mmap = snd_dma_noncoherent_mmap,
+	.sync = snd_dma_noncoherent_sync,
+};
+
 #endif /* CONFIG_HAS_DMA */
 
 /*
@@ -479,14 +661,15 @@ static const struct snd_malloc_ops *dma_ops[] = {
 #ifdef CONFIG_HAS_DMA
 	[SNDRV_DMA_TYPE_DEV] = &snd_dma_dev_ops,
 	[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
+	[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
+	[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
+#ifdef CONFIG_SND_DMA_SGBUF
+	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
+#endif
 #ifdef CONFIG_GENERIC_ALLOCATOR
 	[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
 #endif /* CONFIG_GENERIC_ALLOCATOR */
 #endif /* CONFIG_HAS_DMA */
-#ifdef CONFIG_SND_DMA_SGBUF
-	[SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
-	[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
-#endif
 };
 
 static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
diff --git a/sound/core/memalloc_local.h b/sound/core/memalloc_local.h
index 9f2e0a608b49..a6f3a87194da 100644
--- a/sound/core/memalloc_local.h
+++ b/sound/core/memalloc_local.h
@@ -10,6 +10,7 @@ struct snd_malloc_ops {
 	unsigned int (*get_chunk_size)(struct snd_dma_buffer *dmab,
 				       unsigned int ofs, unsigned int size);
 	int (*mmap)(struct snd_dma_buffer *dmab, struct vm_area_struct *area);
+	void (*sync)(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode);
 };
 
 #ifdef CONFIG_SND_DMA_SGBUF
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index dfe5a64e19d2..e4e176854ce7 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -453,6 +453,8 @@ static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
 	sstatus.suspended_state = status->suspended_state;
 	sstatus.audio_tstamp = status->audio_tstamp;
 	snd_pcm_stream_unlock_irq(substream);
+	if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
+		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 	if (put_user(sstatus.state, &src->s.status.state) ||
 	    put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
 	    put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) ||
@@ -533,6 +535,8 @@ static int snd_pcm_ioctl_sync_ptr_buggy(struct snd_pcm_substream *substream,
 	sync_ptr.s.status.suspended_state = status->suspended_state;
 	sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
 	snd_pcm_stream_unlock_irq(substream);
+	if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL))
+		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
 		return -EFAULT;
 	return 0;
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index a144a3f68e9e..4f4b4739f987 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -106,6 +106,7 @@ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_ufram
 		frames -= transfer;
 		ofs = 0;
 	}
+	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 }
 
 #ifdef CONFIG_SND_DEBUG
@@ -2256,8 +2257,12 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
 			goto _end_unlock;
 		}
 		snd_pcm_stream_unlock_irq(substream);
+		if (!is_playback)
+			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
 		err = writer(substream, appl_ofs, data, offset, frames,
 			     transfer);
+		if (is_playback)
+			snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 		snd_pcm_stream_lock_irq(substream);
 		if (err < 0)
 			goto _end_unlock;
diff --git a/sound/core/pcm_local.h b/sound/core/pcm_local.h
index fe9689b8a6a6..ecb21697ae3a 100644
--- a/sound/core/pcm_local.h
+++ b/sound/core/pcm_local.h
@@ -73,4 +73,11 @@ void snd_pcm_sync_stop(struct snd_pcm_substream *substream, bool sync_irq);
 		for ((subs) = (pcm)->streams[str].substream; (subs); \
 		     (subs) = (subs)->next)
 
+static inline void snd_pcm_dma_buffer_sync(struct snd_pcm_substream *substream,
+					   enum snd_dma_sync_mode mode)
+{
+	if (substream->runtime->info & SNDRV_PCM_INFO_EXPLICIT_SYNC)
+		snd_dma_buffer_sync(snd_pcm_get_dma_buf(substream), mode);
+}
+
 #endif	/* __SOUND_CORE_PCM_LOCAL_H */
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index 7fbd1ccbb5b0..b70ce3b69ab4 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -32,15 +32,20 @@ module_param(max_alloc_per_card, ulong, 0644);
 MODULE_PARM_DESC(max_alloc_per_card, "Max total allocation bytes per card.");
 
 static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
-			  size_t size, struct snd_dma_buffer *dmab)
+			  int str, size_t size, struct snd_dma_buffer *dmab)
 {
+	enum dma_data_direction dir;
 	int err;
 
 	if (max_alloc_per_card &&
 	    card->total_pcm_alloc_bytes + size > max_alloc_per_card)
 		return -ENOMEM;
 
-	err = snd_dma_alloc_pages(type, dev, size, dmab);
+	if (str == SNDRV_PCM_STREAM_PLAYBACK)
+		dir = DMA_TO_DEVICE;
+	else
+		dir = DMA_FROM_DEVICE;
+	err = snd_dma_alloc_dir_pages(type, dev, dir, size, dmab);
 	if (!err) {
 		mutex_lock(&card->memory_mutex);
 		card->total_pcm_alloc_bytes += dmab->bytes;
@@ -77,7 +82,7 @@ static int preallocate_pcm_pages(struct snd_pcm_substream *substream,
 
 	do {
 		err = do_alloc_pages(card, dmab->dev.type, dmab->dev.dev,
-				     size, dmab);
+				     substream->stream, size, dmab);
 		if (err != -ENOMEM)
 			return err;
 		if (no_fallback)
@@ -177,6 +182,7 @@ static void snd_pcm_lib_preallocate_proc_write(struct snd_info_entry *entry,
 			if (do_alloc_pages(card,
 					   substream->dma_buffer.dev.type,
 					   substream->dma_buffer.dev.dev,
+					   substream->stream,
 					   size, &new_dmab) < 0) {
 				buffer->error = -ENOMEM;
 				pr_debug("ALSA pcmC%dD%d%c,%d:%s: cannot preallocate for size %zu\n",
@@ -418,6 +424,7 @@ int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size)
 		if (do_alloc_pages(card,
 				   substream->dma_buffer.dev.type,
 				   substream->dma_buffer.dev.dev,
+				   substream->stream,
 				   size, dmab) < 0) {
 			kfree(dmab);
 			pr_debug("ALSA pcmC%dD%d%c,%d:%s: cannot preallocate for size %zu\n",
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index d233cb3b41d8..621883e71194 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2685,6 +2685,13 @@ int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
 		goto error;
 	}
 
+	/* automatically set EXPLICIT_SYNC flag in the managed mode whenever
+	 * the DMA buffer requires it
+	 */
+	if (substream->managed_buffer_alloc &&
+	    substream->dma_buffer.dev.need_sync)
+		substream->runtime->hw.info |= SNDRV_PCM_INFO_EXPLICIT_SYNC;
+
 	*rsubstream = substream;
 	return 0;
 
@@ -2912,6 +2919,8 @@ static snd_pcm_sframes_t snd_pcm_rewind(struct snd_pcm_substream *substream,
 		ret = rewind_appl_ptr(substream, frames,
 				      snd_pcm_hw_avail(substream));
 	snd_pcm_stream_unlock_irq(substream);
+	if (ret >= 0)
+		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 	return ret;
 }
 
@@ -2929,35 +2938,31 @@ static snd_pcm_sframes_t snd_pcm_forward(struct snd_pcm_substream *substream,
 		ret = forward_appl_ptr(substream, frames,
 				       snd_pcm_avail(substream));
 	snd_pcm_stream_unlock_irq(substream);
+	if (ret >= 0)
+		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 	return ret;
 }
 
-static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
-{
-	int err;
-
-	snd_pcm_stream_lock_irq(substream);
-	err = do_pcm_hwsync(substream);
-	snd_pcm_stream_unlock_irq(substream);
-	return err;
-}
-		
 static int snd_pcm_delay(struct snd_pcm_substream *substream,
 			 snd_pcm_sframes_t *delay)
 {
 	int err;
-	snd_pcm_sframes_t n = 0;
 
 	snd_pcm_stream_lock_irq(substream);
 	err = do_pcm_hwsync(substream);
-	if (!err)
-		n = snd_pcm_calc_delay(substream);
+	if (delay && !err)
+		*delay = snd_pcm_calc_delay(substream);
 	snd_pcm_stream_unlock_irq(substream);
-	if (!err)
-		*delay = n;
+	snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_CPU);
+
 	return err;
 }
 		
+static inline int snd_pcm_hwsync(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_delay(substream, NULL);
+}
+
 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
 			    struct snd_pcm_sync_ptr __user *_sync_ptr)
 {
@@ -3000,6 +3005,8 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
 	sync_ptr.s.status.suspended_state = status->suspended_state;
 	sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
 	snd_pcm_stream_unlock_irq(substream);
+	if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL))
+		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
 		return -EFAULT;
 	return 0;
@@ -3096,6 +3103,8 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
 	sstatus.suspended_state = status->suspended_state;
 	sstatus.audio_tstamp = status->audio_tstamp;
 	snd_pcm_stream_unlock_irq(substream);
+	if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
+		snd_pcm_dma_buffer_sync(substream, SNDRV_DMA_SYNC_DEVICE);
 	if (put_user(sstatus.state, &src->s.status.state) ||
 	    put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
 	    put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp_sec) ||
@@ -3218,6 +3227,9 @@ static int snd_pcm_common_ioctl(struct file *file,
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
 
+	if (substream->runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
+		return -EBADFD;
+
 	res = snd_power_wait(substream->pcm->card);
 	if (res < 0)
 		return res;
@@ -3272,7 +3284,7 @@ static int snd_pcm_common_ioctl(struct file *file,
 		return snd_pcm_hwsync(substream);
 	case SNDRV_PCM_IOCTL_DELAY:
 	{
-		snd_pcm_sframes_t delay;
+		snd_pcm_sframes_t delay = 0;
 		snd_pcm_sframes_t __user *res = arg;
 		int err;
 
@@ -3344,6 +3356,9 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
 	snd_pcm_uframes_t *frames = arg;
 	snd_pcm_sframes_t result;
 	
+	if (substream->runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
+		return -EBADFD;
+
 	switch (cmd) {
 	case SNDRV_PCM_IOCTL_FORWARD:
 	{
@@ -3386,7 +3401,8 @@ static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
 	runtime = substream->runtime;
-	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
+	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
+	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
 		return -EBADFD;
 	if (!frame_aligned(runtime, count))
 		return -EINVAL;
@@ -3410,7 +3426,8 @@ static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
 	runtime = substream->runtime;
-	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
+	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
+	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
 		return -EBADFD;
 	if (!frame_aligned(runtime, count))
 		return -EINVAL;
@@ -3436,7 +3453,8 @@ static ssize_t snd_pcm_readv(struct kiocb *iocb, struct iov_iter *to)
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
 	runtime = substream->runtime;
-	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
+	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
+	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
 		return -EBADFD;
 	if (!iter_is_iovec(to))
 		return -EINVAL;
@@ -3472,7 +3490,8 @@ static ssize_t snd_pcm_writev(struct kiocb *iocb, struct iov_iter *from)
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
 	runtime = substream->runtime;
-	if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
+	if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
+	    runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
 		return -EBADFD;
 	if (!iter_is_iovec(from))
 		return -EINVAL;
@@ -3511,6 +3530,9 @@ static __poll_t snd_pcm_poll(struct file *file, poll_table *wait)
 		return ok | EPOLLERR;
 
 	runtime = substream->runtime;
+	if (runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
+		return ok | EPOLLERR;
+
 	poll_wait(file, &runtime->sleep, wait);
 
 	mask = 0;
@@ -3820,6 +3842,8 @@ static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
 	substream = pcm_file->substream;
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
+	if (substream->runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
+		return -EBADFD;
 
 	offset = area->vm_pgoff << PAGE_SHIFT;
 	switch (offset) {
@@ -3856,6 +3880,8 @@ static int snd_pcm_fasync(int fd, struct file * file, int on)
 	if (PCM_RUNTIME_CHECK(substream))
 		return -ENXIO;
 	runtime = substream->runtime;
+	if (runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
+		return -EBADFD;
 	return fasync_helper(fd, file, on, &runtime->fasync);
 }
 
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
deleted file mode 100644
index 8352a5cdb19f..000000000000
--- a/sound/core/sgbuf.c
+++ /dev/null
@@ -1,201 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Scatter-Gather buffer
- *
- *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
- */
-
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <sound/memalloc.h>
-#include "memalloc_local.h"
-
-struct snd_sg_page {
-	void *buf;
-	dma_addr_t addr;
-};
-
-struct snd_sg_buf {
-	int size;	/* allocated byte size */
-	int pages;	/* allocated pages */
-	int tblsize;	/* allocated table size */
-	struct snd_sg_page *table;	/* address table */
-	struct page **page_table;	/* page table (for vmap/vunmap) */
-	struct device *dev;
-};
-
-/* table entries are align to 32 */
-#define SGBUF_TBL_ALIGN		32
-#define sgbuf_align_table(tbl)	ALIGN((tbl), SGBUF_TBL_ALIGN)
-
-static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
-{
-	struct snd_sg_buf *sgbuf = dmab->private_data;
-	struct snd_dma_buffer tmpb;
-	int i;
-
-	if (!sgbuf)
-		return;
-
-	vunmap(dmab->area);
-	dmab->area = NULL;
-
-	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
-	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
-		tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC;
-	tmpb.dev.dev = sgbuf->dev;
-	for (i = 0; i < sgbuf->pages; i++) {
-		if (!(sgbuf->table[i].addr & ~PAGE_MASK))
-			continue; /* continuous pages */
-		tmpb.area = sgbuf->table[i].buf;
-		tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
-		tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
-		snd_dma_free_pages(&tmpb);
-	}
-
-	kfree(sgbuf->table);
-	kfree(sgbuf->page_table);
-	kfree(sgbuf);
-	dmab->private_data = NULL;
-}
-
-#define MAX_ALLOC_PAGES		32
-
-static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
-{
-	struct snd_sg_buf *sgbuf;
-	unsigned int i, pages, chunk, maxpages;
-	struct snd_dma_buffer tmpb;
-	struct snd_sg_page *table;
-	struct page **pgtable;
-	int type = SNDRV_DMA_TYPE_DEV;
-	pgprot_t prot = PAGE_KERNEL;
-	void *area;
-
-	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
-	if (!sgbuf)
-		return NULL;
-	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) {
-		type = SNDRV_DMA_TYPE_DEV_WC;
-#ifdef pgprot_noncached
-		prot = pgprot_noncached(PAGE_KERNEL);
-#endif
-	}
-	sgbuf->dev = dmab->dev.dev;
-	pages = snd_sgbuf_aligned_pages(size);
-	sgbuf->tblsize = sgbuf_align_table(pages);
-	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
-	if (!table)
-		goto _failed;
-	sgbuf->table = table;
-	pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
-	if (!pgtable)
-		goto _failed;
-	sgbuf->page_table = pgtable;
-
-	/* allocate pages */
-	maxpages = MAX_ALLOC_PAGES;
-	while (pages > 0) {
-		chunk = pages;
-		/* don't be too eager to take a huge chunk */
-		if (chunk > maxpages)
-			chunk = maxpages;
-		chunk <<= PAGE_SHIFT;
-		if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
-						 chunk, &tmpb) < 0) {
-			if (!sgbuf->pages)
-				goto _failed;
-			size = sgbuf->pages * PAGE_SIZE;
-			break;
-		}
-		chunk = tmpb.bytes >> PAGE_SHIFT;
-		for (i = 0; i < chunk; i++) {
-			table->buf = tmpb.area;
-			table->addr = tmpb.addr;
-			if (!i)
-				table->addr |= chunk; /* mark head */
-			table++;
-			*pgtable++ = virt_to_page(tmpb.area);
-			tmpb.area += PAGE_SIZE;
-			tmpb.addr += PAGE_SIZE;
-		}
-		sgbuf->pages += chunk;
-		pages -= chunk;
-		if (chunk < maxpages)
-			maxpages = chunk;
-	}
-
-	sgbuf->size = size;
-	area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
-	if (!area)
-		goto _failed;
-	return area;
-
- _failed:
-	snd_dma_sg_free(dmab); /* free the table */
-	return NULL;
-}
-
-static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
-				      size_t offset)
-{
-	struct snd_sg_buf *sgbuf = dmab->private_data;
-	dma_addr_t addr;
-
-	addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
-	addr &= ~((dma_addr_t)PAGE_SIZE - 1);
-	return addr + offset % PAGE_SIZE;
-}
-
-static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
-					size_t offset)
-{
-	struct snd_sg_buf *sgbuf = dmab->private_data;
-	unsigned int idx = offset >> PAGE_SHIFT;
-
-	if (idx >= (unsigned int)sgbuf->pages)
-		return NULL;
-	return sgbuf->page_table[idx];
-}
-
-static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
-					      unsigned int ofs,
-					      unsigned int size)
-{
-	struct snd_sg_buf *sg = dmab->private_data;
-	unsigned int start, end, pg;
-
-	start = ofs >> PAGE_SHIFT;
-	end = (ofs + size - 1) >> PAGE_SHIFT;
-	/* check page continuity */
-	pg = sg->table[start].addr >> PAGE_SHIFT;
-	for (;;) {
-		start++;
-		if (start > end)
-			break;
-		pg++;
-		if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
-			return (start << PAGE_SHIFT) - ofs;
-	}
-	/* ok, all on continuous pages */
-	return size;
-}
-
-static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab,
-			   struct vm_area_struct *area)
-{
-	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
-		area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
-	return -ENOENT; /* continue with the default mmap handler */
-}
-
-const struct snd_malloc_ops snd_dma_sg_ops = {
-	.alloc = snd_dma_sg_alloc,
-	.free = snd_dma_sg_free,
-	.get_addr = snd_dma_sg_get_addr,
-	.get_page = snd_dma_sg_get_page,
-	.get_chunk_size = snd_dma_sg_get_chunk_size,
-	.mmap = snd_dma_sg_mmap,
-};