summary refs log tree commit diff
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c212
1 files changed, 81 insertions, 131 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac97007038..9a250eb53098 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@ struct nouveau_sgdma_be {
 	dma_addr_t *pages;
 	unsigned nr_pages;
 
-	unsigned pte_start;
+	u64 offset;
 	bool bound;
 };
 
@@ -74,18 +74,6 @@ nouveau_sgdma_clear(struct ttm_backend *be)
 	}
 }
 
-static inline unsigned
-nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
-
-	if (dev_priv->card_type < NV_50)
-		return pte + 2;
-
-	return pte << 1;
-}
-
 static int
 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 {
@@ -97,32 +85,17 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 
 	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
 
-	pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
-	nvbe->pte_start = pte;
+	nvbe->offset = mem->start << PAGE_SHIFT;
+	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 	for (i = 0; i < nvbe->nr_pages; i++) {
 		dma_addr_t dma_offset = nvbe->pages[i];
 		uint32_t offset_l = lower_32_bits(dma_offset);
-		uint32_t offset_h = upper_32_bits(dma_offset);
-
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-			if (dev_priv->card_type < NV_50) {
-				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
-				pte += 1;
-			} else {
-				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
-				nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
-				pte += 2;
-			}
 
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
+			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
 			dma_offset += NV_CTXDMA_PAGE_SIZE;
 		}
 	}
-	dev_priv->engine.instmem.flush(nvbe->dev);
-
-	if (dev_priv->card_type == NV_50) {
-		dev_priv->engine.fifo.tlb_flush(dev);
-		dev_priv->engine.graph.tlb_flush(dev);
-	}
 
 	nvbe->bound = true;
 	return 0;
@@ -142,28 +115,10 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
 	if (!nvbe->bound)
 		return 0;
 
-	pte = nvbe->pte_start;
+	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 	for (i = 0; i < nvbe->nr_pages; i++) {
-		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
-
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-			if (dev_priv->card_type < NV_50) {
-				nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
-				pte += 1;
-			} else {
-				nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
-				nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
-				pte += 2;
-			}
-
-			dma_offset += NV_CTXDMA_PAGE_SIZE;
-		}
-	}
-	dev_priv->engine.instmem.flush(nvbe->dev);
-
-	if (dev_priv->card_type == NV_50) {
-		dev_priv->engine.fifo.tlb_flush(dev);
-		dev_priv->engine.graph.tlb_flush(dev);
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
+			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
 	}
 
 	nvbe->bound = false;
@@ -186,6 +141,35 @@ nouveau_sgdma_destroy(struct ttm_backend *be)
 	}
 }
 
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+	nvbe->offset = mem->start << PAGE_SHIFT;
+
+	nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
+			  nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+	nvbe->bound = true;
+	return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+	if (!nvbe->bound)
+		return 0;
+
+	nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
+			    nvbe->nr_pages << PAGE_SHIFT);
+	nvbe->bound = false;
+	return 0;
+}
+
 static struct ttm_backend_func nouveau_sgdma_backend = {
 	.populate		= nouveau_sgdma_populate,
 	.clear			= nouveau_sgdma_clear,
@@ -194,23 +178,30 @@ static struct ttm_backend_func nouveau_sgdma_backend = {
 	.destroy		= nouveau_sgdma_destroy
 };
 
+static struct ttm_backend_func nv50_sgdma_backend = {
+	.populate		= nouveau_sgdma_populate,
+	.clear			= nouveau_sgdma_clear,
+	.bind			= nv50_sgdma_bind,
+	.unbind			= nv50_sgdma_unbind,
+	.destroy		= nouveau_sgdma_destroy
+};
+
 struct ttm_backend *
 nouveau_sgdma_init_ttm(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_sgdma_be *nvbe;
 
-	if (!dev_priv->gart_info.sg_ctxdma)
-		return NULL;
-
 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
 	if (!nvbe)
 		return NULL;
 
 	nvbe->dev = dev;
 
-	nvbe->backend.func	= &nouveau_sgdma_backend;
-
+	if (dev_priv->card_type < NV_50)
+		nvbe->backend.func = &nouveau_sgdma_backend;
+	else
+		nvbe->backend.func = &nv50_sgdma_backend;
 	return &nvbe->backend;
 }
 
@@ -218,7 +209,6 @@ int
 nouveau_sgdma_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_gpuobj *gpuobj = NULL;
 	uint32_t aper_size, obj_size;
 	int i, ret;
@@ -231,68 +221,40 @@ nouveau_sgdma_init(struct drm_device *dev)
 
 		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
 		obj_size += 8; /* ctxdma header */
-	} else {
-		/* 1 entire VM page table */
-		aper_size = (512 * 1024 * 1024);
-		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
-	}
-
-	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
-				      NVOBJ_FLAG_ZERO_ALLOC |
-				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
-		return ret;
-	}
-
-	dev_priv->gart_info.sg_dummy_page =
-		alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
-	if (!dev_priv->gart_info.sg_dummy_page) {
-		nouveau_gpuobj_ref(NULL, &gpuobj);
-		return -ENOMEM;
-	}
 
-	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
-	dev_priv->gart_info.sg_dummy_bus =
-		pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
-			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
-		nouveau_gpuobj_ref(NULL, &gpuobj);
-		return -EFAULT;
-	}
+		ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+					      NVOBJ_FLAG_ZERO_ALLOC |
+					      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+		if (ret) {
+			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+			return ret;
+		}
 
-	if (dev_priv->card_type < NV_50) {
-		/* special case, allocated from global instmem heap so
-		 * cinst is invalid, we use it on all channels though so
-		 * cinst needs to be valid, set it the same as pinst
-		 */
-		gpuobj->cinst = gpuobj->pinst;
-
-		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
-		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
-		 * on those cards? */
 		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
 				   (1 << 12) /* PT present */ |
 				   (0 << 13) /* PT *not* linear */ |
-				   (NV_DMA_ACCESS_RW  << 14) |
-				   (NV_DMA_TARGET_PCI << 16));
+				   (0 << 14) /* RW */ |
+				   (2 << 16) /* PCI */);
 		nv_wo32(gpuobj, 4, aper_size - 1);
-		for (i = 2; i < 2 + (aper_size >> 12); i++) {
-			nv_wo32(gpuobj, i * 4,
-				dev_priv->gart_info.sg_dummy_bus | 3);
-		}
-	} else {
-		for (i = 0; i < obj_size; i += 8) {
-			nv_wo32(gpuobj, i + 0, 0x00000000);
-			nv_wo32(gpuobj, i + 4, 0x00000000);
-		}
+		for (i = 2; i < 2 + (aper_size >> 12); i++)
+			nv_wo32(gpuobj, i * 4, 0x00000000);
+
+		dev_priv->gart_info.sg_ctxdma = gpuobj;
+		dev_priv->gart_info.aper_base = 0;
+		dev_priv->gart_info.aper_size = aper_size;
+	} else
+	if (dev_priv->chan_vm) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
+				     12, NV_MEM_ACCESS_RW,
+				     &dev_priv->gart_info.vma);
+		if (ret)
+			return ret;
+
+		dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
+		dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
 	}
-	dev_priv->engine.instmem.flush(dev);
 
 	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
-	dev_priv->gart_info.aper_base = 0;
-	dev_priv->gart_info.aper_size = aper_size;
-	dev_priv->gart_info.sg_ctxdma = gpuobj;
 	return 0;
 }
 
@@ -301,31 +263,19 @@ nouveau_sgdma_takedown(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->gart_info.sg_dummy_page) {
-		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
-			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		unlock_page(dev_priv->gart_info.sg_dummy_page);
-		__free_page(dev_priv->gart_info.sg_dummy_page);
-		dev_priv->gart_info.sg_dummy_page = NULL;
-		dev_priv->gart_info.sg_dummy_bus = 0;
-	}
-
 	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
+	nouveau_vm_put(&dev_priv->gart_info.vma);
 }
 
-int
-nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+uint32_t
+nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
-	int pte;
+	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 
-	pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
-	if (dev_priv->card_type < NV_50) {
-		*page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
-		return 0;
-	}
+	BUG_ON(dev_priv->card_type >= NV_50);
 
-	NV_ERROR(dev, "Unimplemented on NV50\n");
-	return -EINVAL;
+	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
+		(offset & NV_CTXDMA_PAGE_MASK);
 }