summary refs log tree commit diff
path: root/drivers/char
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 16:46:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 16:46:44 -0800
commitfffddfd6c8e0c10c42c6e2cc54ba880fcc36ebbb (patch)
tree71bc5e597124dbaf7550f1e089d675718b3ed5c0 /drivers/char
parent69086a78bdc973ec0b722be790b146e84ba8a8c4 (diff)
parentbe88298b0a3f771a4802f20c5e66af74bfd1dff1 (diff)
downloadlinux-fffddfd6c8e0c10c42c6e2cc54ba880fcc36ebbb.tar.gz
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm merge from Dave Airlie:
 "Highlights:

   - TI LCD controller KMS driver

   - TI OMAP KMS driver merged from staging

   - drop gma500 stub driver

   - the fbcon locking fixes

   - the vgacon dirty like zebra fix.

   - open firmware videomode and hdmi common code helpers

   - major locking rework for kms object handling - pageflip/cursor
     won't block on polling anymore!

   - fbcon helper and prime helper cleanups

   - i915: all over the map, haswell power well enhancements, valleyview
     macro horrors cleaned up, killing lots of legacy GTT code,

   - radeon: CS ioctl unification, deprecated UMS support, gpu reset
     rework, VM fixes

   - nouveau: reworked thermal code, external dp/tmds encoder support
     (anx9805), fences sleep instead of polling,

   - exynos: all over the driver fixes."

Lovely conflict in radeon/evergreen_cs.c between commit de0babd60d8d
("drm/radeon: enforce use of radeon_get_ib_value when reading user cmd")
and the new changes that modified that evergreen_dma_cs_parse()
function.

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (508 commits)
  drm/tilcdc: only build on arm
  drm/i915: Revert hdmi HDP pin checks
  drm/tegra: Add list of framebuffers to debugfs
  drm/tegra: Fix color expansion
  drm/tegra: Split DC_CMD_STATE_CONTROL register write
  drm/tegra: Implement page-flipping support
  drm/tegra: Implement VBLANK support
  drm/tegra: Implement .mode_set_base()
  drm/tegra: Add plane support
  drm/tegra: Remove bogus tegra_framebuffer structure
  drm: Add consistency check for page-flipping
  drm/radeon: Use generic HDMI infoframe helpers
  drm/tegra: Use generic HDMI infoframe helpers
  drm: Add EDID helper documentation
  drm: Add HDMI infoframe helpers
  video: Add generic HDMI infoframe helpers
  drm: Add some missing forward declarations
  drm: Move mode tables to drm_edid.c
  drm: Remove duplicate drm_mode_cea_vic()
  gma500: Fix n, m1 and m2 clock limits for sdvo and lvds
  ...
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/agp/intel-gtt.c128
1 files changed, 73 insertions, 55 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index dbd901e94ea6..b8e2014cb9cb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -60,7 +60,6 @@ struct intel_gtt_driver {
 };
 
 static struct _intel_private {
-	struct intel_gtt base;
 	const struct intel_gtt_driver *driver;
 	struct pci_dev *pcidev;	/* device one */
 	struct pci_dev *bridge_dev;
@@ -75,7 +74,18 @@ static struct _intel_private {
 	struct resource ifp_resource;
 	int resource_valid;
 	struct page *scratch_page;
+	phys_addr_t scratch_page_dma;
 	int refcount;
+	/* Whether i915 needs to use the dmar apis or not. */
+	unsigned int needs_dmar : 1;
+	phys_addr_t gma_bus_addr;
+	/*  Size of memory reserved for graphics by the BIOS */
+	unsigned int stolen_size;
+	/* Total number of gtt entries. */
+	unsigned int gtt_total_entries;
+	/* Part of the gtt that is mappable by the cpu, for those chips where
+	 * this is not the full gtt. */
+	unsigned int gtt_mappable_entries;
 } intel_private;
 
 #define INTEL_GTT_GEN	intel_private.driver->gen
@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
 	get_page(page);
 	set_pages_uc(page, 1);
 
-	if (intel_private.base.needs_dmar) {
+	if (intel_private.needs_dmar) {
 		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
 				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
 			return -EINVAL;
 
-		intel_private.base.scratch_page_dma = dma_addr;
+		intel_private.scratch_page_dma = dma_addr;
 	} else
-		intel_private.base.scratch_page_dma = page_to_phys(page);
+		intel_private.scratch_page_dma = page_to_phys(page);
 
 	intel_private.scratch_page = page;
 
@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
 		/* On previous hardware, the GTT size was just what was
 		 * required to map the aperture.
 		 */
-		return intel_private.base.gtt_mappable_entries;
+		return intel_private.gtt_mappable_entries;
 	}
 }
 
@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
 static void intel_gtt_teardown_scratch_page(void)
 {
 	set_pages_wb(intel_private.scratch_page, 1);
-	pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
+	pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
 		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 	put_page(intel_private.scratch_page);
 	__free_page(intel_private.scratch_page);
@@ -562,6 +572,40 @@ static void intel_gtt_cleanup(void)
 	intel_gtt_teardown_scratch_page();
 }
 
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline int needs_ilk_vtd_wa(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+	const unsigned short gpu_devid = intel_private.pcidev->device;
+
+	/* Query intel_iommu to see if we need the workaround. Presumably that
+	 * was loaded first.
+	 */
+	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+	     intel_iommu_gfx_mapped)
+		return 1;
+#endif
+	return 0;
+}
+
+static bool intel_gtt_can_wc(void)
+{
+	if (INTEL_GTT_GEN <= 2)
+		return false;
+
+	if (INTEL_GTT_GEN >= 6)
+		return false;
+
+	/* Reports of major corruption with ILK vt'd enabled */
+	if (needs_ilk_vtd_wa())
+		return false;
+
+	return true;
+}
+
 static int intel_gtt_init(void)
 {
 	u32 gma_addr;
@@ -572,8 +616,8 @@ static int intel_gtt_init(void)
 	if (ret != 0)
 		return ret;
 
-	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
-	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+	intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
+	intel_private.gtt_total_entries = intel_gtt_total_entries();
 
 	/* save the PGETBL reg for resume */
 	intel_private.PGETBL_save =
@@ -585,13 +629,13 @@ static int intel_gtt_init(void)
 
 	dev_info(&intel_private.bridge_dev->dev,
 			"detected gtt size: %dK total, %dK mappable\n",
-			intel_private.base.gtt_total_entries * 4,
-			intel_private.base.gtt_mappable_entries * 4);
+			intel_private.gtt_total_entries * 4,
+			intel_private.gtt_mappable_entries * 4);
 
-	gtt_map_size = intel_private.base.gtt_total_entries * 4;
+	gtt_map_size = intel_private.gtt_total_entries * 4;
 
 	intel_private.gtt = NULL;
-	if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
+	if (intel_gtt_can_wc())
 		intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
 					       gtt_map_size);
 	if (intel_private.gtt == NULL)
@@ -602,13 +646,12 @@ static int intel_gtt_init(void)
 		iounmap(intel_private.registers);
 		return -ENOMEM;
 	}
-	intel_private.base.gtt = intel_private.gtt;
 
 	global_cache_flush();   /* FIXME: ? */
 
-	intel_private.base.stolen_size = intel_gtt_stolen_size();
+	intel_private.stolen_size = intel_gtt_stolen_size();
 
-	intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
+	intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
 
 	ret = intel_gtt_setup_scratch_page();
 	if (ret != 0) {
@@ -623,7 +666,7 @@ static int intel_gtt_init(void)
 		pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
 				      &gma_addr);
 
-	intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
+	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
 	return 0;
 }
@@ -634,8 +677,7 @@ static int intel_fake_agp_fetch_size(void)
 	unsigned int aper_size;
 	int i;
 
-	aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
-		    / MB(1);
+	aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
 
 	for (i = 0; i < num_sizes; i++) {
 		if (aper_size == intel_fake_agp_sizes[i].size) {
@@ -779,7 +821,7 @@ static int intel_fake_agp_configure(void)
 	    return -EIO;
 
 	intel_private.clear_fake_agp = true;
-	agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
+	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
 
 	return 0;
 }
@@ -841,12 +883,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 {
 	int ret = -EINVAL;
 
-	if (intel_private.base.do_idle_maps)
-		return -ENODEV;
-
 	if (intel_private.clear_fake_agp) {
-		int start = intel_private.base.stolen_size / PAGE_SIZE;
-		int end = intel_private.base.gtt_mappable_entries;
+		int start = intel_private.stolen_size / PAGE_SIZE;
+		int end = intel_private.gtt_mappable_entries;
 		intel_gtt_clear_range(start, end - start);
 		intel_private.clear_fake_agp = false;
 	}
@@ -857,7 +896,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 	if (mem->page_count == 0)
 		goto out;
 
-	if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
+	if (pg_start + mem->page_count > intel_private.gtt_total_entries)
 		goto out_err;
 
 	if (type != mem->type)
@@ -869,7 +908,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 	if (!mem->is_flushed)
 		global_cache_flush();
 
-	if (intel_private.base.needs_dmar) {
+	if (intel_private.needs_dmar) {
 		struct sg_table st;
 
 		ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
@@ -895,7 +934,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 	unsigned int i;
 
 	for (i = first_entry; i < (first_entry + num_entries); i++) {
-		intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
+		intel_private.driver->write_entry(intel_private.scratch_page_dma,
 						  i, 0);
 	}
 	readl(intel_private.gtt+i-1);
@@ -908,12 +947,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
 	if (mem->page_count == 0)
 		return 0;
 
-	if (intel_private.base.do_idle_maps)
-		return -ENODEV;
-
 	intel_gtt_clear_range(pg_start, mem->page_count);
 
-	if (intel_private.base.needs_dmar) {
+	if (intel_private.needs_dmar) {
 		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
 		mem->sg_list = NULL;
 		mem->num_sg = 0;
@@ -1070,25 +1106,6 @@ static void i965_write_entry(dma_addr_t addr,
 	writel(addr | pte_flags, intel_private.gtt + entry);
 }
 
-/* Certain Gen5 chipsets require require idling the GPU before
- * unmapping anything from the GTT when VT-d is enabled.
- */
-static inline int needs_idle_maps(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
-	const unsigned short gpu_devid = intel_private.pcidev->device;
-
-	/* Query intel_iommu to see if we need the workaround. Presumably that
-	 * was loaded first.
-	 */
-	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
-	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
-	     intel_iommu_gfx_mapped)
-		return 1;
-#endif
-	return 0;
-}
-
 static int i9xx_setup(void)
 {
 	u32 reg_addr, gtt_addr;
@@ -1116,9 +1133,6 @@ static int i9xx_setup(void)
 		break;
 	}
 
-	if (needs_idle_maps())
-		intel_private.base.do_idle_maps = 1;
-
 	intel_i9xx_setup_flush();
 
 	return 0;
@@ -1390,9 +1404,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-struct intel_gtt *intel_gtt_get(void)
+void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
+		   phys_addr_t *mappable_base, unsigned long *mappable_end)
 {
-	return &intel_private.base;
+	*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
+	*stolen_size = intel_private.stolen_size;
+	*mappable_base = intel_private.gma_bus_addr;
+	*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
 }
 EXPORT_SYMBOL(intel_gtt_get);