summary refs log tree commit diff
path: root/arch/arm/xen
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2014-11-21 11:08:39 +0000
committerDavid Vrabel <david.vrabel@citrix.com>2014-12-04 12:41:54 +0000
commitda095a996090a412c3b6292c7a8680661dca157d (patch)
tree02247bce77b50867c227384ed023bab81ffe77c4 /arch/arm/xen
parenta4dba130891271084344c12537731542ec77cb85 (diff)
downloadlinux-da095a996090a412c3b6292c7a8680661dca157d.tar.gz
xen/arm: introduce GNTTABOP_cache_flush
Introduce support for new hypercall GNTTABOP_cache_flush.
Use it to perform cache flashing on pages used for dma when necessary.

If GNTTABOP_cache_flush is supported by the hypervisor, we don't need to
bounce dma map operations that involve foreign grants and non-coherent
devices.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Ian Campbell <ian.campbell@citrix.com>
Diffstat (limited to 'arch/arm/xen')
-rw-r--r--arch/arm/xen/mm.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 28ebf3ecee4e..351b24a979d4 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -12,6 +12,7 @@
 #include <linux/swiotlb.h>
 
 #include <xen/xen.h>
+#include <xen/interface/grant_table.h>
 #include <xen/interface/memory.h>
 #include <xen/swiotlb-xen.h>
 
@@ -24,12 +25,14 @@ enum dma_cache_op {
        DMA_UNMAP,
        DMA_MAP,
 };
+static bool hypercall_cflush = false;
 
 /* functions called by SWIOTLB */
 
 static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
 	size_t size, enum dma_data_direction dir, enum dma_cache_op op)
 {
+	struct gnttab_cache_flush cflush;
 	unsigned long pfn;
 	size_t left = size;
 
@@ -39,7 +42,26 @@ static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
 	do {
 		size_t len = left;
 	
-		/* TODO: cache flush */
+		/* buffers in highmem or foreign pages cannot cross page
+		 * boundaries */
+		if (len + offset > PAGE_SIZE)
+			len = PAGE_SIZE - offset;
+
+		cflush.op = 0;
+		cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
+		cflush.offset = offset;
+		cflush.length = len;
+
+		if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
+			cflush.op = GNTTAB_CACHE_INVAL;
+		if (op == DMA_MAP) {
+			if (dir == DMA_FROM_DEVICE)
+				cflush.op = GNTTAB_CACHE_INVAL;
+			else
+				cflush.op = GNTTAB_CACHE_CLEAN;
+		}
+		if (cflush.op)
+			HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
 
 		offset = 0;
 		pfn++;
@@ -104,7 +126,7 @@ bool xen_arch_need_swiotlb(struct device *dev,
 			   unsigned long pfn,
 			   unsigned long mfn)
 {
-	return ((pfn != mfn) && !is_device_dma_coherent(dev));
+	return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
 }
 
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
@@ -147,10 +169,18 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
 
 int __init xen_mm_init(void)
 {
+	struct gnttab_cache_flush cflush;
 	if (!xen_initial_domain())
 		return 0;
 	xen_swiotlb_init(1, false);
 	xen_dma_ops = &xen_swiotlb_dma_ops;
+
+	cflush.op = 0;
+	cflush.a.dev_bus_addr = 0;
+	cflush.offset = 0;
+	cflush.length = 0;
+	if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
+		hypercall_cflush = true;
 	return 0;
 }
 arch_initcall(xen_mm_init);