summary refs log tree commit diff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-17 15:09:20 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-17 15:09:20 -0700
commitf7ea4a4ba84f382e8eb143e435551de0feee5b4b (patch)
tree0771d5413f1b9104816ca0ddec21c5503d562a3d
parent5564da7e9d12ea80745f66c8d2ec7bd00f3f94eb (diff)
parent4b40893918203ee1a1f6a114316c2a19c072e9bd (diff)
downloadlinux-f7ea4a4ba84f382e8eb143e435551de0feee5b4b.tar.gz
Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (44 commits)
  drm/i915: fix ioremap of a user address for non-root (CVE-2008-3831)
  drm: make CONFIG_DRM depend on CONFIG_SHMEM.
  radeon: fix PCI bus mastering support enables.
  radeon: add RS400 family support.
  drm/radeon: add support for RS740 IGP chipsets.
  i915: GM45 has GM965-style MCH setup.
  i915: Don't run retire work handler while suspended
  i915: Map status page cached for chips with GTT-based HWS location.
  i915: Fix up ring initialization to cover G45 oddities
  i915: Use non-reserved status page index for breadcrumb
  drm: Increment dev_priv->irq_received so i915_gem_interrupts count works.
  drm: kill drm_device->irq
  drm: wbinvd is cache coherent.
  i915: add missing return in error path.
  i915: fixup permissions on gem ioctls.
  drm: Clean up many sparse warnings in i915.
  drm: Use ioremap_wc in i915_driver instead of ioremap, since we always want WC.
  drm: G33-class hardware has a newer 965-style MCH (no DCC register).
  drm: Avoid oops in GEM execbuffers with bad arguments.
  DRM: Return -EBADF on bad object in flink, and return curent name if it exists.
  ...
-rw-r--r--arch/x86/mm/highmem_32.c1
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c52
-rw-r--r--drivers/gpu/drm/drm_cache.c69
-rw-r--r--drivers/gpu/drm/drm_drv.c6
-rw-r--r--drivers/gpu/drm/drm_fops.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c421
-rw-r--r--drivers/gpu/drm/drm_irq.c464
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c5
-rw-r--r--drivers/gpu/drm/drm_proc.c135
-rw-r--r--drivers/gpu/drm/drm_stub.c11
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c332
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c476
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1180
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2558
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c201
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c292
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c257
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c514
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c371
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1417
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c509
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c29
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h6
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c74
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c29
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h11
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c55
-rw-r--r--drivers/gpu/drm/r128/r128_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c53
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h57
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c268
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c10
-rw-r--r--drivers/gpu/drm/via/via_drv.c26
-rw-r--r--drivers/gpu/drm/via/via_drv.h16
-rw-r--r--drivers/gpu/drm/via/via_irq.c105
-rw-r--r--drivers/gpu/drm/via/via_mm.c3
-rw-r--r--include/drm/drm.h63
-rw-r--r--include/drm/drmP.h249
-rw-r--r--include/drm/drm_pciids.h54
-rw-r--r--include/drm/i915_drm.h333
-rw-r--r--mm/shmem.c1
49 files changed, 8814 insertions, 1964 deletions
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 165c871ba9af..bcc079c282dd 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 
 	return (void*) vaddr;
 }
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
 
 struct page *kmap_atomic_to_page(void *ptr)
 {
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 610d6fd5bb50..9097500de5f4 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
 #
 menuconfig DRM
 	tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
-	depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
+	depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && SHMEM
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
@@ -87,6 +87,7 @@ config DRM_MGA
 config DRM_SIS
 	tristate "SiS video cards"
 	depends on DRM && AGP
+	depends on FB_SIS || FB_SIS=n
 	help
 	  Choose this option if you have a SiS 630 or compatible video
           chipset. If M is selected the module will be called sis. AGP
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index e9f9a97ae00a..74da99495e21 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,8 +4,9 @@
 
 ccflags-y := -Iinclude/drm
 
-drm-y       :=	drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
-		drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
+drm-y       :=	drm_auth.o drm_bufs.o drm_cache.o \
+		drm_context.o drm_dma.o drm_drawable.o \
+		drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
 		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
 		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
 		drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index aefa5ac4c0b1..3d33b8252b58 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,6 +33,7 @@
 
 #include "drmP.h"
 #include <linux/module.h>
+#include <asm/agp.h>
 
 #if __OS_HAS_AGP
 
@@ -452,4 +453,53 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
 	return agp_unbind_memory(handle);
 }
 
-#endif				/* __OS_HAS_AGP */
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+		   struct page **pages,
+		   unsigned long num_pages,
+		   uint32_t gtt_offset,
+		   u32 type)
+{
+	DRM_AGP_MEM *mem;
+	int ret, i;
+
+	DRM_DEBUG("\n");
+
+	mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+				      type);
+	if (mem == NULL) {
+		DRM_ERROR("Failed to allocate memory for %ld pages\n",
+			  num_pages);
+		return NULL;
+	}
+
+	for (i = 0; i < num_pages; i++)
+		mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
+	mem->page_count = num_pages;
+
+	mem->is_flushed = true;
+	ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+	if (ret != 0) {
+		DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+		agp_free_memory(mem);
+		return NULL;
+	}
+
+	return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
+
+void drm_agp_chipset_flush(struct drm_device *dev)
+{
+	agp_flush_chipset(dev->agp->bridge);
+}
+EXPORT_SYMBOL(drm_agp_chipset_flush);
+
+#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
new file mode 100644
index 000000000000..0e994a0e46d4
--- /dev/null
+++ b/drivers/gpu/drm/drm_cache.c
@@ -0,0 +1,69 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+#if defined(CONFIG_X86)
+static void
+drm_clflush_page(struct page *page)
+{
+	uint8_t *page_virtual;
+	unsigned int i;
+
+	if (unlikely(page == NULL))
+		return;
+
+	page_virtual = kmap_atomic(page, KM_USER0);
+	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+		clflush(page_virtual + i);
+	kunmap_atomic(page_virtual, KM_USER0);
+}
+#endif
+
+void
+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
+{
+
+#if defined(CONFIG_X86)
+	if (cpu_has_clflush) {
+		unsigned long i;
+
+		mb();
+		for (i = 0; i < num_pages; ++i)
+			drm_clflush_page(*pages++);
+		mb();
+
+		return;
+	}
+
+	wbinvd();
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_pages);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 452c2d866ec5..96f416afc3f6 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -116,7 +116,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 
 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
 
+	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 851a53f1acce..0d46627663b1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -246,7 +246,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 	memset(priv, 0, sizeof(*priv));
 	filp->private_data = priv;
 	priv->filp = filp;
-	priv->uid = current->euid;
+	priv->uid = current_euid();
 	priv->pid = task_pid_nr(current);
 	priv->minor = idr_find(&drm_minors_idr, minor_id);
 	priv->ioctl_count = 0;
@@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
 	INIT_LIST_HEAD(&priv->lhead);
 
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_open(dev, priv);
+
 	if (dev->driver->open) {
 		ret = dev->driver->open(dev, priv);
 		if (ret < 0)
@@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
 		dev->driver->reclaim_buffers(dev, file_priv);
 	}
 
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_release(dev, file_priv);
+
 	drm_fasync(-1, filp, 0);
 
 	mutex_lock(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
new file mode 100644
index 000000000000..ccd1afdede02
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file.  However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ *   default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ *   handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls.  The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+	spin_lock_init(&dev->object_name_lock);
+	idr_init(&dev->object_name_idr);
+	atomic_set(&dev->object_count, 0);
+	atomic_set(&dev->object_memory, 0);
+	atomic_set(&dev->pin_count, 0);
+	atomic_set(&dev->pin_memory, 0);
+	atomic_set(&dev->gtt_count, 0);
+	atomic_set(&dev->gtt_memory, 0);
+	return 0;
+}
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+	struct drm_gem_object *obj;
+
+	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+	obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
+
+	obj->dev = dev;
+	obj->filp = shmem_file_setup("drm mm object", size, 0);
+	if (IS_ERR(obj->filp)) {
+		kfree(obj);
+		return NULL;
+	}
+
+	kref_init(&obj->refcount);
+	kref_init(&obj->handlecount);
+	obj->size = size;
+	if (dev->driver->gem_init_object != NULL &&
+	    dev->driver->gem_init_object(obj) != 0) {
+		fput(obj->filp);
+		kfree(obj);
+		return NULL;
+	}
+	atomic_inc(&dev->object_count);
+	atomic_add(obj->size, &dev->object_memory);
+	return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, int handle)
+{
+	struct drm_device *dev;
+	struct drm_gem_object *obj;
+
+	/* This is gross. The idr system doesn't let us try a delete and
+	 * return an error code.  It just spews if you fail at deleting.
+	 * So, we have to grab a lock around finding the object and then
+	 * doing the delete on it and dropping the refcount, or the user
+	 * could race us to double-decrement the refcount and cause a
+	 * use-after-free later.  Given the frequency of our handle lookups,
+	 * we may want to use ida for number allocation and a hash table
+	 * for the pointers, anyway.
+	 */
+	spin_lock(&filp->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&filp->object_idr, handle);
+	if (obj == NULL) {
+		spin_unlock(&filp->table_lock);
+		return -EINVAL;
+	}
+	dev = obj->dev;
+
+	/* Release reference and decrement refcount. */
+	idr_remove(&filp->object_idr, handle);
+	spin_unlock(&filp->table_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_handle_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+		       struct drm_gem_object *obj,
+		       int *handlep)
+{
+	int	ret;
+
+	/*
+	 * Get the user-visible handle using idr.
+	 */
+again:
+	/* ensure there is space available to allocate a handle */
+	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+		return -ENOMEM;
+
+	/* do the allocation under our spinlock */
+	spin_lock(&file_priv->table_lock);
+	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
+	spin_unlock(&file_priv->table_lock);
+	if (ret == -EAGAIN)
+		goto again;
+
+	if (ret != 0)
+		return ret;
+
+	drm_gem_object_handle_reference(obj);
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+		      int handle)
+{
+	struct drm_gem_object *obj;
+
+	spin_lock(&filp->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&filp->object_idr, handle);
+	if (obj == NULL) {
+		spin_unlock(&filp->table_lock);
+		return NULL;
+	}
+
+	drm_gem_object_reference(obj);
+
+	spin_unlock(&filp->table_lock);
+
+	return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_gem_close *args = data;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	ret = drm_gem_handle_delete(file_priv, args->handle);
+
+	return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_gem_flink *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+
+again:
+	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
+		return -ENOMEM;
+
+	spin_lock(&dev->object_name_lock);
+	if (obj->name) {
+		args->name = obj->name;
+		spin_unlock(&dev->object_name_lock);
+		return 0;
+	}
+	ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+				 &obj->name);
+	spin_unlock(&dev->object_name_lock);
+	if (ret == -EAGAIN)
+		goto again;
+
+	if (ret != 0) {
+		mutex_lock(&dev->struct_mutex);
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	/*
+	 * Leave the reference from the lookup around as the
+	 * name table now holds one
+	 */
+	args->name = (uint64_t) obj->name;
+
+	return 0;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_gem_open *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+	int handle;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	spin_lock(&dev->object_name_lock);
+	obj = idr_find(&dev->object_name_idr, (int) args->name);
+	if (obj)
+		drm_gem_object_reference(obj);
+	spin_unlock(&dev->object_name_lock);
+	if (!obj)
+		return -ENOENT;
+
+	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	args->size = obj->size;
+
+	return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+	idr_init(&file_private->object_idr);
+	spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+	struct drm_gem_object *obj = ptr;
+
+	drm_gem_object_handle_unreference(obj);
+
+	return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+	mutex_lock(&dev->struct_mutex);
+	idr_for_each(&file_private->object_idr,
+		     &drm_gem_object_release_handle, NULL);
+
+	idr_destroy(&file_private->object_idr);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+	struct drm_device *dev = obj->dev;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (dev->driver->gem_free_object != NULL)
+		dev->driver->gem_free_object(obj);
+
+	fput(obj->filp);
+	atomic_dec(&dev->object_count);
+	atomic_sub(obj->size, &dev->object_memory);
+	kfree(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct kref *kref)
+{
+	struct drm_gem_object *obj = container_of(kref,
+						  struct drm_gem_object,
+						  handlecount);
+	struct drm_device *dev = obj->dev;
+
+	/* Remove any name for this object */
+	spin_lock(&dev->object_name_lock);
+	if (obj->name) {
+		idr_remove(&dev->object_name_idr, obj->name);
+		spin_unlock(&dev->object_name_lock);
+		/*
+		 * The object name held a reference to this object, drop
+		 * that now.
+		 */
+		drm_gem_object_unreference(obj);
+	} else
+		spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 53f0e5af1cc8..4091b9e291f9 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
 	    p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
 		return -EINVAL;
 
-	p->irq = dev->irq;
+	p->irq = dev->pdev->irq;
 
 	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
 		  p->irq);
@@ -71,25 +71,137 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
 	return 0;
 }
 
+static void vblank_disable_fn(unsigned long arg)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	unsigned long irqflags;
+	int i;
+
+	if (!dev->vblank_disable_allowed)
+		return;
+
+	for (i = 0; i < dev->num_crtcs; i++) {
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+		    dev->vblank_enabled[i]) {
+			DRM_DEBUG("disabling vblank on crtc %d\n", i);
+			dev->last_vblank[i] =
+				dev->driver->get_vblank_counter(dev, i);
+			dev->driver->disable_vblank(dev, i);
+			dev->vblank_enabled[i] = 0;
+		}
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+	}
+}
+
+static void drm_vblank_cleanup(struct drm_device *dev)
+{
+	/* Bail if the driver didn't call drm_vblank_init() */
+	if (dev->num_crtcs == 0)
+		return;
+
+	del_timer(&dev->vblank_disable_timer);
+
+	vblank_disable_fn((unsigned long)dev);
+
+	drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
+		 DRM_MEM_DRIVER);
+	drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
+		 DRM_MEM_DRIVER);
+	drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
+		 dev->num_crtcs, DRM_MEM_DRIVER);
+	drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
+		 dev->num_crtcs, DRM_MEM_DRIVER);
+	drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
+		 dev->num_crtcs, DRM_MEM_DRIVER);
+	drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
+		 DRM_MEM_DRIVER);
+	drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
+		 dev->num_crtcs, DRM_MEM_DRIVER);
+
+	dev->num_crtcs = 0;
+}
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+	int i, ret = -ENOMEM;
+
+	setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+		    (unsigned long)dev);
+	spin_lock_init(&dev->vbl_lock);
+	atomic_set(&dev->vbl_signal_pending, 0);
+	dev->num_crtcs = num_crtcs;
+
+	dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
+				   DRM_MEM_DRIVER);
+	if (!dev->vbl_queue)
+		goto err;
+
+	dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
+				  DRM_MEM_DRIVER);
+	if (!dev->vbl_sigs)
+		goto err;
+
+	dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
+				      DRM_MEM_DRIVER);
+	if (!dev->_vblank_count)
+		goto err;
+
+	dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
+					 DRM_MEM_DRIVER);
+	if (!dev->vblank_refcount)
+		goto err;
+
+	dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
+					 DRM_MEM_DRIVER);
+	if (!dev->vblank_enabled)
+		goto err;
+
+	dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
+	if (!dev->last_vblank)
+		goto err;
+
+	dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
+					 DRM_MEM_DRIVER);
+	if (!dev->vblank_inmodeset)
+		goto err;
+
+	/* Zero per-crtc vblank stuff */
+	for (i = 0; i < num_crtcs; i++) {
+		init_waitqueue_head(&dev->vbl_queue[i]);
+		INIT_LIST_HEAD(&dev->vbl_sigs[i]);
+		atomic_set(&dev->_vblank_count[i], 0);
+		atomic_set(&dev->vblank_refcount[i], 0);
+	}
+
+	dev->vblank_disable_allowed = 0;
+
+	return 0;
+
+err:
+	drm_vblank_cleanup(dev);
+	return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
 /**
  * Install IRQ handler.
  *
  * \param dev DRM device.
- * \param irq IRQ number.
  *
- * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
+ * Initializes the IRQ related data. Installs the handler, calling the driver
  * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
  * before and after the installation.
  */
-static int drm_irq_install(struct drm_device * dev)
+int drm_irq_install(struct drm_device *dev)
 {
-	int ret;
+	int ret = 0;
 	unsigned long sh_flags = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return -EINVAL;
 
-	if (dev->irq == 0)
+	if (dev->pdev->irq == 0)
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
@@ -107,18 +219,7 @@ static int drm_irq_install(struct drm_device * dev)
 	dev->irq_enabled = 1;
 	mutex_unlock(&dev->struct_mutex);
 
-	DRM_DEBUG("irq=%d\n", dev->irq);
-
-	if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
-		init_waitqueue_head(&dev->vbl_queue);
-
-		spin_lock_init(&dev->vbl_lock);
-
-		INIT_LIST_HEAD(&dev->vbl_sigs);
-		INIT_LIST_HEAD(&dev->vbl_sigs2);
-
-		dev->vbl_pending = 0;
-	}
+	DRM_DEBUG("irq=%d\n", dev->pdev->irq);
 
 	/* Before installing handler */
 	dev->driver->irq_preinstall(dev);
@@ -127,8 +228,9 @@ static int drm_irq_install(struct drm_device * dev)
 	if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
 		sh_flags = IRQF_SHARED;
 
-	ret = request_irq(dev->irq, dev->driver->irq_handler,
+	ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
 			  sh_flags, dev->devname, dev);
+
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
 		dev->irq_enabled = 0;
@@ -137,10 +239,16 @@ static int drm_irq_install(struct drm_device * dev)
 	}
 
 	/* After installing handler */
-	dev->driver->irq_postinstall(dev);
+	ret = dev->driver->irq_postinstall(dev);
+	if (ret < 0) {
+		mutex_lock(&dev->struct_mutex);
+		dev->irq_enabled = 0;
+		mutex_unlock(&dev->struct_mutex);
+	}
 
-	return 0;
+	return ret;
 }
+EXPORT_SYMBOL(drm_irq_install);
 
 /**
  * Uninstall the IRQ handler.
@@ -164,17 +272,18 @@ int drm_irq_uninstall(struct drm_device * dev)
 	if (!irq_enabled)
 		return -EINVAL;
 
-	DRM_DEBUG("irq=%d\n", dev->irq);
+	DRM_DEBUG("irq=%d\n", dev->pdev->irq);
 
 	dev->driver->irq_uninstall(dev);
 
-	free_irq(dev->irq, dev);
+	free_irq(dev->pdev->irq, dev);
+
+	drm_vblank_cleanup(dev);
 
 	dev->locked_tasklet_func = NULL;
 
 	return 0;
 }
-
 EXPORT_SYMBOL(drm_irq_uninstall);
 
 /**
@@ -201,7 +310,7 @@ int drm_control(struct drm_device *dev, void *data,
 		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 			return 0;
 		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
-		    ctl->irq != dev->irq)
+		    ctl->irq != dev->pdev->irq)
 			return -EINVAL;
 		return drm_irq_install(dev);
 	case DRM_UNINST_HANDLER:
@@ -214,6 +323,174 @@ int drm_control(struct drm_device *dev, void *data,
 }
 
 /**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+	return atomic_read(&dev->_vblank_count[crtc]);
+}
+EXPORT_SYMBOL(drm_vblank_count);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc).  Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+	u32 cur_vblank, diff;
+
+	/*
+	 * Interrupts were disabled prior to this call, so deal with counter
+	 * wrap if needed.
+	 * NOTE!  It's possible we lost a full dev->max_vblank_count events
+	 * here if the register is small or we had vblank interrupts off for
+	 * a long time.
+	 */
+	cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+	diff = cur_vblank - dev->last_vblank[crtc];
+	if (cur_vblank < dev->last_vblank[crtc]) {
+		diff += dev->max_vblank_count;
+
+		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+			  crtc, dev->last_vblank[crtc], cur_vblank, diff);
+	}
+
+	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+		  crtc, diff);
+
+	atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags;
+	int ret = 0;
+
+	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	/* Going from 0->1 means we have to enable interrupts again */
+	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
+	    !dev->vblank_enabled[crtc]) {
+		ret = dev->driver->enable_vblank(dev, crtc);
+		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+		if (ret)
+			atomic_dec(&dev->vblank_refcount[crtc]);
+		else {
+			dev->vblank_enabled[crtc] = 1;
+			drm_update_vblank_count(dev, crtc);
+		}
+	}
+	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_vblank_get);
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+	/* Last user schedules interrupt disable */
+	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
+		mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+}
+EXPORT_SYMBOL(drm_vblank_put);
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets.  If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_modeset_ctl *modeset = data;
+	unsigned long irqflags;
+	int crtc, ret = 0;
+
+	/* If drm_vblank_init() hasn't been called yet, just no-op */
+	if (!dev->num_crtcs)
+		goto out;
+
+	crtc = modeset->crtc;
+	if (crtc >= dev->num_crtcs) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * To avoid all the problems that might happen if interrupts
+	 * were enabled/disabled around or between these calls, we just
+	 * have the kernel take a reference on the CRTC (just once though
+	 * to avoid corrupting the count if multiple, mismatch calls occur),
+	 * so that interrupts remain enabled in the interim.
+	 */
+	switch (modeset->cmd) {
+	case _DRM_PRE_MODESET:
+		if (!dev->vblank_inmodeset[crtc]) {
+			dev->vblank_inmodeset[crtc] = 1;
+			drm_vblank_get(dev, crtc);
+		}
+		break;
+	case _DRM_POST_MODESET:
+		if (dev->vblank_inmodeset[crtc]) {
+			spin_lock_irqsave(&dev->vbl_lock, irqflags);
+			dev->vblank_disable_allowed = 1;
+			dev->vblank_inmodeset[crtc] = 0;
+			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+			drm_vblank_put(dev, crtc);
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+out:
+	return ret;
+}
+
+/**
  * Wait for VBLANK.
  *
  * \param inode device inode.
@@ -232,14 +509,14 @@ int drm_control(struct drm_device *dev, void *data,
  *
  * If a signal is not requested, then calls vblank_wait().
  */
-int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_wait_vblank(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
 {
 	union drm_wait_vblank *vblwait = data;
-	struct timeval now;
 	int ret = 0;
-	unsigned int flags, seq;
+	unsigned int flags, seq, crtc;
 
-	if ((!dev->irq) || (!dev->irq_enabled))
+	if ((!dev->pdev->irq) || (!dev->irq_enabled))
 		return -EINVAL;
 
 	if (vblwait->request.type &
@@ -251,13 +528,17 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 	}
 
 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+	crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
 
-	if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
-				    DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
+	if (crtc >= dev->num_crtcs)
 		return -EINVAL;
 
-	seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
-			  : &dev->vbl_received);
+	ret = drm_vblank_get(dev, crtc);
+	if (ret) {
+		DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
+		return ret;
+	}
+	seq = drm_vblank_count(dev, crtc);
 
 	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
 	case _DRM_VBLANK_RELATIVE:
@@ -266,7 +547,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 	case _DRM_VBLANK_ABSOLUTE:
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		goto done;
 	}
 
 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -276,8 +558,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 
 	if (flags & _DRM_VBLANK_SIGNAL) {
 		unsigned long irqflags;
-		struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
-				      ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+		struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
 		struct drm_vbl_sig *vbl_sig;
 
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -298,22 +579,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 			}
 		}
 
-		if (dev->vbl_pending >= 100) {
+		if (atomic_read(&dev->vbl_signal_pending) >= 100) {
 			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-			return -EBUSY;
+			ret = -EBUSY;
+			goto done;
 		}
 
-		dev->vbl_pending++;
-
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
-		if (!
-		    (vbl_sig =
-		     drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
-			return -ENOMEM;
+		vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
+				     DRM_MEM_DRIVER);
+		if (!vbl_sig) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		ret = drm_vblank_get(dev, crtc);
+		if (ret) {
+			drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
+				 DRM_MEM_DRIVER);
+			return ret;
 		}
 
-		memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
+		atomic_inc(&dev->vbl_signal_pending);
 
 		vbl_sig->sequence = vblwait->request.sequence;
 		vbl_sig->info.si_signo = vblwait->request.signal;
@@ -327,20 +615,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 
 		vblwait->reply.sequence = seq;
 	} else {
-		if (flags & _DRM_VBLANK_SECONDARY) {
-			if (dev->driver->vblank_wait2)
-				ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
-		} else if (dev->driver->vblank_wait)
-			ret =
-			    dev->driver->vblank_wait(dev,
-						     &vblwait->request.sequence);
-
-		do_gettimeofday(&now);
-		vblwait->reply.tval_sec = now.tv_sec;
-		vblwait->reply.tval_usec = now.tv_usec;
+		DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+			  vblwait->request.sequence, crtc);
+		DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+			    ((drm_vblank_count(dev, crtc)
+			      - vblwait->request.sequence) <= (1 << 23)));
+
+		if (ret != -EINTR) {
+			struct timeval now;
+
+			do_gettimeofday(&now);
+
+			vblwait->reply.tval_sec = now.tv_sec;
+			vblwait->reply.tval_usec = now.tv_usec;
+			vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+			DRM_DEBUG("returning %d to client\n",
+				  vblwait->reply.sequence);
+		} else {
+			DRM_DEBUG("vblank wait interrupted by signal\n");
+		}
 	}
 
-      done:
+done:
+	drm_vblank_put(dev, crtc);
 	return ret;
 }
 
@@ -348,44 +645,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
  * Send the VBLANK signals.
  *
  * \param dev DRM device.
+ * \param crtc CRTC where the vblank event occurred
  *
  * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
  *
  * If a signal is not requested, then calls vblank_wait().
  */
-void drm_vbl_send_signals(struct drm_device * dev)
+static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
 {
+	struct drm_vbl_sig *vbl_sig, *tmp;
+	struct list_head *vbl_sigs;
+	unsigned int vbl_seq;
 	unsigned long flags;
-	int i;
 
 	spin_lock_irqsave(&dev->vbl_lock, flags);
 
-	for (i = 0; i < 2; i++) {
-		struct drm_vbl_sig *vbl_sig, *tmp;
-		struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
-		unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
-						   &dev->vbl_received);
+	vbl_sigs = &dev->vbl_sigs[crtc];
+	vbl_seq = drm_vblank_count(dev, crtc);
 
-		list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
-			if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
-				vbl_sig->info.si_code = vbl_seq;
-				send_sig_info(vbl_sig->info.si_signo,
-					      &vbl_sig->info, vbl_sig->task);
+	list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
+	    if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+		vbl_sig->info.si_code = vbl_seq;
+		send_sig_info(vbl_sig->info.si_signo,
+			      &vbl_sig->info, vbl_sig->task);
 
-				list_del(&vbl_sig->head);
-
-				drm_free(vbl_sig, sizeof(*vbl_sig),
-					 DRM_MEM_DRIVER);
+		list_del(&vbl_sig->head);
 
-				dev->vbl_pending--;
-			}
-		}
+		drm_free(vbl_sig, sizeof(*vbl_sig),
+			 DRM_MEM_DRIVER);
+		atomic_dec(&dev->vbl_signal_pending);
+		drm_vblank_put(dev, crtc);
+	    }
 	}
 
 	spin_unlock_irqrestore(&dev->vbl_lock, flags);
 }
 
-EXPORT_SYMBOL(drm_vbl_send_signals);
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+void drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+	atomic_inc(&dev->_vblank_count[crtc]);
+	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	drm_vbl_send_signals(dev, crtc);
+}
+EXPORT_SYMBOL(drm_handle_vblank);
 
 /**
  * Tasklet wrapper function.
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 0177012845c6..803bc9e7ce3c 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
 	return drm_agp_free_memory(handle) ? 0 : -EINVAL;
 }
+EXPORT_SYMBOL(drm_free_agp);
 
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
 	return drm_agp_unbind_memory(handle);
 }
+EXPORT_SYMBOL(drm_unbind_agp);
 
 #else  /*  __OS_HAS_AGP  */
 static inline void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index dcff9e9b52e3..217ad7dc7076 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
 
 	return child;
 }
+EXPORT_SYMBOL(drm_mm_get_block);
 
 /*
  * Put a block. Merge with the previous and / or next block if they are free.
@@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
 		drm_free(cur, sizeof(*cur), DRM_MEM_MM);
 	}
 }
+EXPORT_SYMBOL(drm_mm_put_block);
 
 struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
 				  unsigned long size,
@@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
 
 	return (head->next->next == head);
 }
+EXPORT_SYMBOL(drm_mm_search_free);
 
 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
@@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 
 	return drm_mm_create_tail_node(mm, start, size);
 }
-
+EXPORT_SYMBOL(drm_mm_init);
 
 void drm_mm_takedown(struct drm_mm * mm)
 {
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index 93b1e0475c93..d490db4c0de0 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
 			   int request, int *eof, void *data);
 static int drm_bufs_info(char *buf, char **start, off_t offset,
 			 int request, int *eof, void *data);
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+			     int request, int *eof, void *data);
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+			       int request, int *eof, void *data);
 #if DRM_DEBUG_CODE
 static int drm_vma_info(char *buf, char **start, off_t offset,
 			int request, int *eof, void *data);
@@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
 static struct drm_proc_list {
 	const char *name;	/**< file name */
 	int (*f) (char *, char **, off_t, int, int *, void *);		/**< proc callback*/
+	u32 driver_features; /**< Required driver features for this entry */
 } drm_proc_list[] = {
-	{"name", drm_name_info},
-	{"mem", drm_mem_info},
-	{"vm", drm_vm_info},
-	{"clients", drm_clients_info},
-	{"queues", drm_queues_info},
-	{"bufs", drm_bufs_info},
+	{"name", drm_name_info, 0},
+	{"mem", drm_mem_info, 0},
+	{"vm", drm_vm_info, 0},
+	{"clients", drm_clients_info, 0},
+	{"queues", drm_queues_info, 0},
+	{"bufs", drm_bufs_info, 0},
+	{"gem_names", drm_gem_name_info, DRIVER_GEM},
+	{"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
 	{"vma", drm_vma_info},
 #endif
@@ -90,8 +97,9 @@ static struct drm_proc_list {
 int drm_proc_init(struct drm_minor *minor, int minor_id,
 		  struct proc_dir_entry *root)
 {
+	struct drm_device *dev = minor->dev;
 	struct proc_dir_entry *ent;
-	int i, j;
+	int i, j, ret;
 	char name[64];
 
 	sprintf(name, "%d", minor_id);
@@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
 	}
 
 	for (i = 0; i < DRM_PROC_ENTRIES; i++) {
+		u32 features = drm_proc_list[i].driver_features;
+
+		if (features != 0 &&
+		    (dev->driver->driver_features & features) != features)
+			continue;
+
 		ent = create_proc_entry(drm_proc_list[i].name,
 					S_IFREG | S_IRUGO, minor->dev_root);
 		if (!ent) {
 			DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
 				  name, drm_proc_list[i].name);
-			for (j = 0; j < i; j++)
-				remove_proc_entry(drm_proc_list[i].name,
-						  minor->dev_root);
-			remove_proc_entry(name, root);
-			minor->dev_root = NULL;
-			return -1;
+			ret = -1;
+			goto fail;
 		}
 		ent->read_proc = drm_proc_list[i].f;
 		ent->data = minor;
 	}
 
+	if (dev->driver->proc_init) {
+		ret = dev->driver->proc_init(minor);
+		if (ret) {
+			DRM_ERROR("DRM: Driver failed to initialize "
+				  "/proc/dri.\n");
+			goto fail;
+		}
+	}
+
 	return 0;
+ fail:
+
+	for (j = 0; j < i; j++)
+		remove_proc_entry(drm_proc_list[i].name,
+				  minor->dev_root);
+	remove_proc_entry(name, root);
+	minor->dev_root = NULL;
+	return ret;
 }
 
 /**
@@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
  */
 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
 {
+	struct drm_device *dev = minor->dev;
 	int i;
 	char name[64];
 
 	if (!root || !minor->dev_root)
 		return 0;
 
+	if (dev->driver->proc_cleanup)
+		dev->driver->proc_cleanup(minor);
+
 	for (i = 0; i < DRM_PROC_ENTRIES; i++)
 		remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
 	sprintf(name, "%d", minor->index);
@@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
 	return ret;
 }
 
+struct drm_gem_name_info_data {
+       int                     len;
+       char                    *buf;
+       int                     eof;
+};
+
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+	struct drm_gem_object *obj = ptr;
+	struct drm_gem_name_info_data   *nid = data;
+
+	DRM_INFO("name %d size %d\n", obj->name, obj->size);
+	if (nid->eof)
+		return 0;
+
+	nid->len += sprintf(&nid->buf[nid->len],
+			    "%6d%9d%8d%9d\n",
+			    obj->name, obj->size,
+			    atomic_read(&obj->handlecount.refcount),
+			    atomic_read(&obj->refcount.refcount));
+	if (nid->len > DRM_PROC_LIMIT) {
+		nid->eof = 1;
+		return 0;
+	}
+	return 0;
+}
+
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+			     int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	struct drm_gem_name_info_data nid;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	nid.len = sprintf(buf, "  name     size handles refcount\n");
+	nid.buf = buf;
+	nid.eof = 0;
+	idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
+
+	*start = &buf[offset];
+	*eof = 0;
+	if (nid.len > request + offset)
+		return request;
+	*eof = 1;
+	return nid.len - offset;
+}
+
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+			       int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
+	DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
+	DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
+	DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
+	DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
+	DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
 #if DRM_DEBUG_CODE
 
 static int drm__vma_info(char *buf, char **start, off_t offset, int request,
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c2f584f3b46c..141e33004a76 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -107,7 +107,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
 #ifdef __alpha__
 	dev->hose = pdev->sysdata;
 #endif
-	dev->irq = pdev->irq;
 
 	if (drm_ht_create(&dev->map_hash, 12)) {
 		return -ENOMEM;
@@ -152,6 +151,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
 		goto error_out_unreg;
 	}
 
+	if (driver->driver_features & DRIVER_GEM) {
+		retcode = drm_gem_init(dev);
+		if (retcode) {
+			DRM_ERROR("Cannot initialize graphics execution "
+				  "manager (GEM)\n");
+			goto error_out_unreg;
+		}
+	}
+
 	return 0;
 
       error_out_unreg:
@@ -317,6 +325,7 @@ int drm_put_dev(struct drm_device * dev)
 int drm_put_minor(struct drm_minor **minor_p)
 {
 	struct drm_minor *minor = *minor_p;
+
 	DRM_DEBUG("release secondary minor %d\n", minor->index);
 
 	if (minor->type == DRM_MINOR_LEGACY)
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index af211a0ef179..1611b9bcbe7f 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -184,7 +184,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
 err_out_files:
 	if (i > 0)
 		for (j = 0; j < i; j++)
-			device_remove_file(&minor->kdev, &device_attrs[i]);
+			device_remove_file(&minor->kdev, &device_attrs[j]);
 	device_unregister(&minor->kdev);
 err_out:
 
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a9e60464df74..5ba78e4fd2b5 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -3,7 +3,12 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
+          i915_suspend.o \
+	  i915_gem.o \
+	  i915_gem_debug.o \
+	  i915_gem_proc.o \
+	  i915_gem_tiling.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9ac4720e647b..db34780edbb2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -40,40 +40,96 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-	u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+	u32 last_acthd = I915_READ(acthd_reg);
+	u32 acthd;
+	u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
 	int i;
 
-	for (i = 0; i < 10000; i++) {
-		ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	for (i = 0; i < 100000; i++) {
+		ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+		acthd = I915_READ(acthd_reg);
 		ring->space = ring->head - (ring->tail + 8);
 		if (ring->space < 0)
 			ring->space += ring->Size;
 		if (ring->space >= n)
 			return 0;
 
-		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+		if (dev_priv->sarea_priv)
+			dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
 		if (ring->head != last_head)
 			i = 0;
+		if (acthd != last_acthd)
+			i = 0;
 
 		last_head = ring->head;
+		last_acthd = acthd;
+		msleep_interruptible(10);
+
 	}
 
 	return -EBUSY;
 }
 
+/**
+ * Sets up the hardware status page for devices that need a physical address
+ * in the register.
+ */
+static int i915_init_phys_hws(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	/* Program Hardware Status Page */
+	dev_priv->status_page_dmah =
+		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+	if (!dev_priv->status_page_dmah) {
+		DRM_ERROR("Can not allocate hardware status page\n");
+		return -ENOMEM;
+	}
+	dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+	I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+	DRM_DEBUG("Enabled hardware status page\n");
+	return 0;
+}
+
+/**
+ * Frees the hardware status page, whether it's a physical address or a virtual
+ * address set up by the X Server.
+ */
+static void i915_free_hws(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	if (dev_priv->status_page_dmah) {
+		drm_pci_free(dev, dev_priv->status_page_dmah);
+		dev_priv->status_page_dmah = NULL;
+	}
+
+	if (dev_priv->status_gfx_addr) {
+		dev_priv->status_gfx_addr = 0;
+		drm_core_ioremapfree(&dev_priv->hws_map, dev);
+	}
+
+	/* Need to rewrite hardware status page */
+	I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
 void i915_kernel_lost_context(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 
-	ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-	ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
+	ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+	ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
 	ring->space = ring->head - (ring->tail + 8);
 	if (ring->space < 0)
 		ring->space += ring->Size;
 
-	if (ring->head == ring->tail)
+	if (ring->head == ring->tail && dev_priv->sarea_priv)
 		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
@@ -84,28 +140,19 @@ static int i915_dma_cleanup(struct drm_device * dev)
 	 * may not have been called from userspace and after dev_private
 	 * is freed, it's too late.
 	 */
-	if (dev->irq)
+	if (dev->irq_enabled)
 		drm_irq_uninstall(dev);
 
 	if (dev_priv->ring.virtual_start) {
 		drm_core_ioremapfree(&dev_priv->ring.map, dev);
-		dev_priv->ring.virtual_start = 0;
-		dev_priv->ring.map.handle = 0;
+		dev_priv->ring.virtual_start = NULL;
+		dev_priv->ring.map.handle = NULL;
 		dev_priv->ring.map.size = 0;
 	}
 
-	if (dev_priv->status_page_dmah) {
-		drm_pci_free(dev, dev_priv->status_page_dmah);
-		dev_priv->status_page_dmah = NULL;
-		/* Need to rewrite hardware status page */
-		I915_WRITE(0x02080, 0x1ffff000);
-	}
-
-	if (dev_priv->status_gfx_addr) {
-		dev_priv->status_gfx_addr = 0;
-		drm_core_ioremapfree(&dev_priv->hws_map, dev);
-		I915_WRITE(0x2080, 0x1ffff000);
-	}
+	/* Clear the HWS virtual address at teardown */
+	if (I915_NEED_GFX_HWS(dev))
+		i915_free_hws(dev);
 
 	return 0;
 }
@@ -121,34 +168,34 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 		return -EINVAL;
 	}
 
-	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
-	if (!dev_priv->mmio_map) {
-		i915_dma_cleanup(dev);
-		DRM_ERROR("can not find mmio map!\n");
-		return -EINVAL;
-	}
-
 	dev_priv->sarea_priv = (drm_i915_sarea_t *)
 	    ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
 
-	dev_priv->ring.Start = init->ring_start;
-	dev_priv->ring.End = init->ring_end;
-	dev_priv->ring.Size = init->ring_size;
-	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+	if (init->ring_size != 0) {
+		if (dev_priv->ring.ring_obj != NULL) {
+			i915_dma_cleanup(dev);
+			DRM_ERROR("Client tried to initialize ringbuffer in "
+				  "GEM mode\n");
+			return -EINVAL;
+		}
 
-	dev_priv->ring.map.offset = init->ring_start;
-	dev_priv->ring.map.size = init->ring_size;
-	dev_priv->ring.map.type = 0;
-	dev_priv->ring.map.flags = 0;
-	dev_priv->ring.map.mtrr = 0;
+		dev_priv->ring.Size = init->ring_size;
+		dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
 
-	drm_core_ioremap(&dev_priv->ring.map, dev);
+		dev_priv->ring.map.offset = init->ring_start;
+		dev_priv->ring.map.size = init->ring_size;
+		dev_priv->ring.map.type = 0;
+		dev_priv->ring.map.flags = 0;
+		dev_priv->ring.map.mtrr = 0;
 
-	if (dev_priv->ring.map.handle == NULL) {
-		i915_dma_cleanup(dev);
-		DRM_ERROR("can not ioremap virtual address for"
-			  " ring buffer\n");
-		return -ENOMEM;
+		drm_core_ioremap(&dev_priv->ring.map, dev);
+
+		if (dev_priv->ring.map.handle == NULL) {
+			i915_dma_cleanup(dev);
+			DRM_ERROR("can not ioremap virtual address for"
+				  " ring buffer\n");
+			return -ENOMEM;
+		}
 	}
 
 	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -159,34 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 	dev_priv->current_page = 0;
 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 
-	/* We are using separate values as placeholders for mechanisms for
-	 * private backbuffer/depthbuffer usage.
-	 */
-	dev_priv->use_mi_batchbuffer_start = 0;
-	if (IS_I965G(dev)) /* 965 doesn't support older method */
-		dev_priv->use_mi_batchbuffer_start = 1;
-
 	/* Allow hardware batchbuffers unless told otherwise.
 	 */
 	dev_priv->allow_batchbuffer = 1;
 
-	/* Program Hardware Status Page */
-	if (!I915_NEED_GFX_HWS(dev)) {
-		dev_priv->status_page_dmah =
-			drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-
-		if (!dev_priv->status_page_dmah) {
-			i915_dma_cleanup(dev);
-			DRM_ERROR("Can not allocate hardware status page\n");
-			return -ENOMEM;
-		}
-		dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
-		dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
-		memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-		I915_WRITE(0x02080, dev_priv->dma_status_page);
-	}
-	DRM_DEBUG("Enabled hardware status page\n");
 	return 0;
 }
 
@@ -201,11 +224,6 @@ static int i915_dma_resume(struct drm_device * dev)
 		return -EINVAL;
 	}
 
-	if (!dev_priv->mmio_map) {
-		DRM_ERROR("can not find mmio map!\n");
-		return -EINVAL;
-	}
-
 	if (dev_priv->ring.map.handle == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
@@ -220,9 +238,9 @@ static int i915_dma_resume(struct drm_device * dev)
 	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
 
 	if (dev_priv->status_gfx_addr != 0)
-		I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+		I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 	else
-		I915_WRITE(0x02080, dev_priv->dma_status_page);
+		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 	DRM_DEBUG("Enabled hardware status page\n");
 
 	return 0;
@@ -367,9 +385,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
 	return 0;
 }
 
-static int i915_emit_box(struct drm_device * dev,
-			 struct drm_clip_rect __user * boxes,
-			 int i, int DR1, int DR4)
+int
+i915_emit_box(struct drm_device *dev,
+	      struct drm_clip_rect __user *boxes,
+	      int i, int DR1, int DR4)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_clip_rect box;
@@ -415,14 +434,15 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
 
-	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
-
+	dev_priv->counter++;
 	if (dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+		dev_priv->counter = 0;
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
 	BEGIN_LP_RING(4);
-	OUT_RING(CMD_STORE_DWORD_IDX);
-	OUT_RING(20);
+	OUT_RING(MI_STORE_DWORD_INDEX);
+	OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
 	OUT_RING(dev_priv->counter);
 	OUT_RING(0);
 	ADVANCE_LP_RING();
@@ -486,7 +506,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 				return ret;
 		}
 
-		if (dev_priv->use_mi_batchbuffer_start) {
+		if (!IS_I830(dev) && !IS_845G(dev)) {
 			BEGIN_LP_RING(2);
 			if (IS_I965G(dev)) {
 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
@@ -516,6 +536,9 @@ static int i915_dispatch_flip(struct drm_device * dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
 
+	if (!dev_priv->sarea_priv)
+		return -EINVAL;
+
 	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
 		  __func__,
 		  dev_priv->current_page,
@@ -524,7 +547,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
 	i915_kernel_lost_context(dev);
 
 	BEGIN_LP_RING(2);
-	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
 	OUT_RING(0);
 	ADVANCE_LP_RING();
 
@@ -549,8 +572,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
 	dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
 
 	BEGIN_LP_RING(4);
-	OUT_RING(CMD_STORE_DWORD_IDX);
-	OUT_RING(20);
+	OUT_RING(MI_STORE_DWORD_INDEX);
+	OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
 	OUT_RING(dev_priv->counter);
 	OUT_RING(0);
 	ADVANCE_LP_RING();
@@ -570,9 +593,15 @@ static int i915_quiescent(struct drm_device * dev)
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv)
 {
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	int ret;
+
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_quiescent(dev);
+	mutex_unlock(&dev->struct_mutex);
 
-	return i915_quiescent(dev);
+	return ret;
 }
 
 static int i915_batchbuffer(struct drm_device *dev, void *data,
@@ -593,16 +622,19 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
 	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
 		  batch->start, batch->used, batch->num_cliprects);
 
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
 	if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
 						       batch->num_cliprects *
 						       sizeof(struct drm_clip_rect)))
 		return -EFAULT;
 
+	mutex_lock(&dev->struct_mutex);
 	ret = i915_dispatch_batchbuffer(dev, batch);
+	mutex_unlock(&dev->struct_mutex);
 
-	sarea_priv->last_dispatch = (int)hw_status[5];
+	if (sarea_priv)
+		sarea_priv->last_dispatch = (int)hw_status[5];
 	return ret;
 }
 
@@ -619,7 +651,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
 	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
 		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
 
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
 	if (cmdbuf->num_cliprects &&
 	    DRM_VERIFYAREA_READ(cmdbuf->cliprects,
@@ -629,24 +661,33 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
 		return -EFAULT;
 	}
 
+	mutex_lock(&dev->struct_mutex);
 	ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+	mutex_unlock(&dev->struct_mutex);
 	if (ret) {
 		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
 		return ret;
 	}
 
-	sarea_priv->last_dispatch = (int)hw_status[5];
+	if (sarea_priv)
+		sarea_priv->last_dispatch = (int)hw_status[5];
 	return 0;
 }
 
 static int i915_flip_bufs(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv)
 {
+	int ret;
+
 	DRM_DEBUG("%s\n", __func__);
 
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_dispatch_flip(dev);
+	mutex_unlock(&dev->struct_mutex);
 
-	return i915_dispatch_flip(dev);
+	return ret;
 }
 
 static int i915_getparam(struct drm_device *dev, void *data,
@@ -663,7 +704,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
 
 	switch (param->param) {
 	case I915_PARAM_IRQ_ACTIVE:
-		value = dev->irq ? 1 : 0;
+		value = dev->pdev->irq ? 1 : 0;
 		break;
 	case I915_PARAM_ALLOW_BATCHBUFFER:
 		value = dev_priv->allow_batchbuffer ? 1 : 0;
@@ -671,6 +712,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_LAST_DISPATCH:
 		value = READ_BREADCRUMB(dev_priv);
 		break;
+	case I915_PARAM_CHIPSET_ID:
+		value = dev->pci_device;
+		break;
+	case I915_PARAM_HAS_GEM:
+		value = 1;
+		break;
 	default:
 		DRM_ERROR("Unknown parameter %d\n", param->param);
 		return -EINVAL;
@@ -697,8 +744,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
 
 	switch (param->param) {
 	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-		if (!IS_I965G(dev))
-			dev_priv->use_mi_batchbuffer_start = param->value;
 		break;
 	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
 		dev_priv->tex_lru_log_granularity = param->value;
@@ -749,8 +794,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 	dev_priv->hw_status_page = dev_priv->hws_map.handle;
 
 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-	I915_WRITE(0x02080, dev_priv->status_gfx_addr);
-	DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
+	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+	DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
 			dev_priv->status_gfx_addr);
 	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
 	return 0;
@@ -776,14 +821,38 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	memset(dev_priv, 0, sizeof(drm_i915_private_t));
 
 	dev->dev_private = (void *)dev_priv;
+	dev_priv->dev = dev;
 
 	/* Add register map (needed for suspend/resume) */
 	base = drm_get_resource_start(dev, mmio_bar);
 	size = drm_get_resource_len(dev, mmio_bar);
 
-	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
-			 _DRM_KERNEL | _DRM_DRIVER,
-			 &dev_priv->mmio_map);
+	dev_priv->regs = ioremap(base, size);
+
+	i915_gem_load(dev);
+
+	/* Init HWS */
+	if (!I915_NEED_GFX_HWS(dev)) {
+		ret = i915_init_phys_hws(dev);
+		if (ret != 0)
+			return ret;
+	}
+
+	/* On the 945G/GM, the chipset reports the MSI capability on the
+	 * integrated graphics even though the support isn't actually there
+	 * according to the published specs.  It doesn't appear to function
+	 * correctly in testing on 945G.
+	 * This may be a side effect of MSI having been made available for PEG
+	 * and the registers being closely associated.
+	 */
+	if (!IS_I945G(dev) && !IS_I945GM(dev))
+		if (pci_enable_msi(dev->pdev))
+			DRM_ERROR("failed to enable MSI\n");
+
+	intel_opregion_init(dev);
+
+	spin_lock_init(&dev_priv->user_irq_lock);
+
 	return ret;
 }
 
@@ -791,8 +860,15 @@ int i915_driver_unload(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->mmio_map)
-		drm_rmmap(dev, dev_priv->mmio_map);
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
+
+	i915_free_hws(dev);
+
+	if (dev_priv->regs != NULL)
+		iounmap(dev_priv->regs);
+
+	intel_opregion_free(dev);
 
 	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
 		 DRM_MEM_DRIVER);
@@ -800,6 +876,25 @@ int i915_driver_unload(struct drm_device *dev)
 	return 0;
 }
 
+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_i915_file_private *i915_file_priv;
+
+	DRM_DEBUG("\n");
+	i915_file_priv = (struct drm_i915_file_private *)
+	    drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
+
+	if (!i915_file_priv)
+		return -ENOMEM;
+
+	file_priv->driver_priv = i915_file_priv;
+
+	i915_file_priv->mm.last_gem_seqno = 0;
+	i915_file_priv->mm.last_gem_throttle_seqno = 0;
+
+	return 0;
+}
+
 void i915_driver_lastclose(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -807,6 +902,8 @@ void i915_driver_lastclose(struct drm_device * dev)
 	if (!dev_priv)
 		return;
 
+	i915_gem_lastclose(dev);
+
 	if (dev_priv->agp_heap)
 		i915_mem_takedown(&(dev_priv->agp_heap));
 
@@ -819,6 +916,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 	i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 }
 
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+	drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+}
+
 struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@@ -836,7 +940,23 @@ struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
 	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
 	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 93aed1c38bd2..a80ead215282 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -38,211 +38,9 @@ static struct pci_device_id pciidlist[] = {
 	i915_PCI_IDS
 };
 
-enum pipe {
-    PIPE_A = 0,
-    PIPE_B,
-};
-
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (pipe == PIPE_A)
-		return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
-	else
-		return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-	u32 *array;
-	int i;
-
-	if (!i915_pipe_enabled(dev, pipe))
-		return;
-
-	if (pipe == PIPE_A)
-		array = dev_priv->save_palette_a;
-	else
-		array = dev_priv->save_palette_b;
-
-	for(i = 0; i < 256; i++)
-		array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-	u32 *array;
-	int i;
-
-	if (!i915_pipe_enabled(dev, pipe))
-		return;
-
-	if (pipe == PIPE_A)
-		array = dev_priv->save_palette_a;
-	else
-		array = dev_priv->save_palette_b;
-
-	for(i = 0; i < 256; i++)
-		I915_WRITE(reg + (i << 2), array[i]);
-}
-
-static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
-{
-	outb(reg, index_port);
-	return inb(data_port);
-}
-
-static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
-{
-	inb(st01);
-	outb(palette_enable | reg, VGA_AR_INDEX);
-	return inb(VGA_AR_DATA_READ);
-}
-
-static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
-{
-	inb(st01);
-	outb(palette_enable | reg, VGA_AR_INDEX);
-	outb(val, VGA_AR_DATA_WRITE);
-}
-
-static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
-{
-	outb(reg, index_port);
-	outb(val, data_port);
-}
-
-static void i915_save_vga(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-	u16 cr_index, cr_data, st01;
-
-	/* VGA color palette registers */
-	dev_priv->saveDACMASK = inb(VGA_DACMASK);
-	/* DACCRX automatically increments during read */
-	outb(0, VGA_DACRX);
-	/* Read 3 bytes of color data from each index */
-	for (i = 0; i < 256 * 3; i++)
-		dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
-
-	/* MSR bits */
-	dev_priv->saveMSR = inb(VGA_MSR_READ);
-	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-		cr_index = VGA_CR_INDEX_CGA;
-		cr_data = VGA_CR_DATA_CGA;
-		st01 = VGA_ST01_CGA;
-	} else {
-		cr_index = VGA_CR_INDEX_MDA;
-		cr_data = VGA_CR_DATA_MDA;
-		st01 = VGA_ST01_MDA;
-	}
-
-	/* CRT controller regs */
-	i915_write_indexed(cr_index, cr_data, 0x11,
-			   i915_read_indexed(cr_index, cr_data, 0x11) &
-			   (~0x80));
-	for (i = 0; i <= 0x24; i++)
-		dev_priv->saveCR[i] =
-			i915_read_indexed(cr_index, cr_data, i);
-	/* Make sure we don't turn off CR group 0 writes */
-	dev_priv->saveCR[0x11] &= ~0x80;
-
-	/* Attribute controller registers */
-	inb(st01);
-	dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
-	for (i = 0; i <= 0x14; i++)
-		dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
-	inb(st01);
-	outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
-	inb(st01);
-
-	/* Graphics controller registers */
-	for (i = 0; i < 9; i++)
-		dev_priv->saveGR[i] =
-			i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
-
-	dev_priv->saveGR[0x10] =
-		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
-	dev_priv->saveGR[0x11] =
-		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
-	dev_priv->saveGR[0x18] =
-		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-
-	/* Sequencer registers */
-	for (i = 0; i < 8; i++)
-		dev_priv->saveSR[i] =
-			i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
-}
-
-static void i915_restore_vga(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-	u16 cr_index, cr_data, st01;
-
-	/* MSR bits */
-	outb(dev_priv->saveMSR, VGA_MSR_WRITE);
-	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-		cr_index = VGA_CR_INDEX_CGA;
-		cr_data = VGA_CR_DATA_CGA;
-		st01 = VGA_ST01_CGA;
-	} else {
-		cr_index = VGA_CR_INDEX_MDA;
-		cr_data = VGA_CR_DATA_MDA;
-		st01 = VGA_ST01_MDA;
-	}
-
-	/* Sequencer registers, don't write SR07 */
-	for (i = 0; i < 7; i++)
-		i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
-				   dev_priv->saveSR[i]);
-
-	/* CRT controller regs */
-	/* Enable CR group 0 writes */
-	i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
-	for (i = 0; i <= 0x24; i++)
-		i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
-
-	/* Graphics controller regs */
-	for (i = 0; i < 9; i++)
-		i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
-				   dev_priv->saveGR[i]);
-
-	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
-			   dev_priv->saveGR[0x10]);
-	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
-			   dev_priv->saveGR[0x11]);
-	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
-			   dev_priv->saveGR[0x18]);
-
-	/* Attribute controller registers */
-	inb(st01);
-	for (i = 0; i <= 0x14; i++)
-		i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
-	inb(st01); /* switch back to index mode */
-	outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
-	inb(st01);
-
-	/* VGA color palette registers */
-	outb(dev_priv->saveDACMASK, VGA_DACMASK);
-	/* DACCRX automatically increments during read */
-	outb(0, VGA_DACWX);
-	/* Read 3 bytes of color data from each index */
-	for (i = 0; i < 256 * 3; i++)
-		outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
-
-}
-
 static int i915_suspend(struct drm_device *dev, pm_message_t state)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
 
 	if (!dev || !dev_priv) {
 		printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
@@ -254,122 +52,10 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
 		return 0;
 
 	pci_save_state(dev->pdev);
-	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
-
-	/* Display arbitration control */
-	dev_priv->saveDSPARB = I915_READ(DSPARB);
-
-	/* Pipe & plane A info */
-	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
-	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
-	dev_priv->saveFPA0 = I915_READ(FPA0);
-	dev_priv->saveFPA1 = I915_READ(FPA1);
-	dev_priv->saveDPLL_A = I915_READ(DPLL_A);
-	if (IS_I965G(dev))
-		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
-	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
-	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
-	dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
-	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
-	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
-	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
-	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
-
-	dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
-	dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
-	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
-	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
-	dev_priv->saveDSPABASE = I915_READ(DSPABASE);
-	if (IS_I965G(dev)) {
-		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
-		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
-	}
-	i915_save_palette(dev, PIPE_A);
-	dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
-
-	/* Pipe & plane B info */
-	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
-	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
-	dev_priv->saveFPB0 = I915_READ(FPB0);
-	dev_priv->saveFPB1 = I915_READ(FPB1);
-	dev_priv->saveDPLL_B = I915_READ(DPLL_B);
-	if (IS_I965G(dev))
-		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
-	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
-	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
-	dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
-	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
-	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
-	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
-	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
-
-	dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
-	dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
-	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
-	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
-	dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
-	if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
-		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
-		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
-	}
-	i915_save_palette(dev, PIPE_B);
-	dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
 
-	/* CRT state */
-	dev_priv->saveADPA = I915_READ(ADPA);
+	i915_save_state(dev);
 
-	/* LVDS state */
-	dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
-	dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-	dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-	if (IS_I965G(dev))
-		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
-	if (IS_MOBILE(dev) && !IS_I830(dev))
-		dev_priv->saveLVDS = I915_READ(LVDS);
-	if (!IS_I830(dev) && !IS_845G(dev))
-		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
-	dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
-	dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
-	dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
-
-	/* FIXME: save TV & SDVO state */
-
-	/* FBC state */
-	dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-	dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-	dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-	dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
-
-	/* Interrupt state */
-	dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
-	dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
-	dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
-
-	/* VGA state */
-	dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
-	dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
-	dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
-	dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
-
-	/* Clock gating state */
-	dev_priv->saveD_STATE = I915_READ(D_STATE);
-	dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
-
-	/* Cache mode state */
-	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-
-	/* Memory Arbitration state */
-	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
-
-	/* Scratch space */
-	for (i = 0; i < 16; i++) {
-		dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
-		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
-	}
-	for (i = 0; i < 3; i++)
-		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
-
-	i915_save_vga(dev);
+	intel_opregion_free(dev);
 
 	if (state.event == PM_EVENT_SUSPEND) {
 		/* Shut down the device */
@@ -382,155 +68,15 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
 
 static int i915_resume(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-
 	pci_set_power_state(dev->pdev, PCI_D0);
 	pci_restore_state(dev->pdev);
 	if (pci_enable_device(dev->pdev))
 		return -1;
 	pci_set_master(dev->pdev);
 
-	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
-
-	I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
-	/* Pipe & plane A info */
-	/* Prime the clock */
-	if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
-		I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
-			   ~DPLL_VCO_ENABLE);
-		udelay(150);
-	}
-	I915_WRITE(FPA0, dev_priv->saveFPA0);
-	I915_WRITE(FPA1, dev_priv->saveFPA1);
-	/* Actually enable it */
-	I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
-	udelay(150);
-	if (IS_I965G(dev))
-		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
-	udelay(150);
-
-	/* Restore mode */
-	I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
-	I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
-	I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
-	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
-	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
-	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
-	I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
-
-	/* Restore plane info */
-	I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
-	I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
-	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
-	I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
-	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
-	if (IS_I965G(dev)) {
-		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
-		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
-	}
-
-	I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
-
-	i915_restore_palette(dev, PIPE_A);
-	/* Enable the plane */
-	I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
-	I915_WRITE(DSPABASE, I915_READ(DSPABASE));
-
-	/* Pipe & plane B info */
-	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
-		I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
-			   ~DPLL_VCO_ENABLE);
-		udelay(150);
-	}
-	I915_WRITE(FPB0, dev_priv->saveFPB0);
-	I915_WRITE(FPB1, dev_priv->saveFPB1);
-	/* Actually enable it */
-	I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
-	udelay(150);
-	if (IS_I965G(dev))
-		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
-	udelay(150);
-
-	/* Restore mode */
-	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
-	I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
-	I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
-	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
-	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
-	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
-	I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
-
-	/* Restore plane info */
-	I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
-	I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
-	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
-	I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
-	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
-	if (IS_I965G(dev)) {
-		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
-		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
-	}
-
-	I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
-
-	i915_restore_palette(dev, PIPE_B);
-	/* Enable the plane */
-	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
-	I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
-
-	/* CRT state */
-	I915_WRITE(ADPA, dev_priv->saveADPA);
-
-	/* LVDS state */
-	if (IS_I965G(dev))
-		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
-	if (IS_MOBILE(dev) && !IS_I830(dev))
-		I915_WRITE(LVDS, dev_priv->saveLVDS);
-	if (!IS_I830(dev) && !IS_845G(dev))
-		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
-
-	I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
-	I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-	I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
-	I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
-	I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
-	I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
-
-	/* FIXME: restore TV & SDVO state */
-
-	/* FBC info */
-	I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
-	I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
-	I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
-	I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
-
-	/* VGA state */
-	I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
-	I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
-	I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
-	I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
-	udelay(150);
-
-	/* Clock gating state */
-	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
-	I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
-
-	/* Cache mode state */
-	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
-
-	/* Memory arbitration state */
-	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
-
-	for (i = 0; i < 16; i++) {
-		I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
-		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
-	}
-	for (i = 0; i < 3; i++)
-		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+	i915_restore_state(dev);
 
-	i915_restore_vga(dev);
+	intel_opregion_init(dev);
 
 	return 0;
 }
@@ -541,17 +87,19 @@ static struct drm_driver driver = {
 	 */
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
-	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
-	    DRIVER_IRQ_VBL2,
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
 	.load = i915_driver_load,
 	.unload = i915_driver_unload,
+	.open = i915_driver_open,
 	.lastclose = i915_driver_lastclose,
 	.preclose = i915_driver_preclose,
+	.postclose = i915_driver_postclose,
 	.suspend = i915_suspend,
 	.resume = i915_resume,
 	.device_is_agp = i915_driver_device_is_agp,
-	.vblank_wait = i915_driver_vblank_wait,
-	.vblank_wait2 = i915_driver_vblank_wait2,
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i915_enable_vblank,
+	.disable_vblank = i915_disable_vblank,
 	.irq_preinstall = i915_driver_irq_preinstall,
 	.irq_postinstall = i915_driver_irq_postinstall,
 	.irq_uninstall = i915_driver_irq_uninstall,
@@ -559,6 +107,10 @@ static struct drm_driver driver = {
 	.reclaim_buffers = drm_core_reclaim_buffers,
 	.get_map_ofs = drm_core_get_map_ofs,
 	.get_reg_ofs = drm_core_get_reg_ofs,
+	.proc_init = i915_gem_proc_init,
+	.proc_cleanup = i915_gem_proc_cleanup,
+	.gem_init_object = i915_gem_init_object,
+	.gem_free_object = i915_gem_free_object,
 	.ioctls = i915_ioctls,
 	.fops = {
 		 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d7326d92a237..eae4ed3956e0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,6 +30,8 @@
 #ifndef _I915_DRV_H_
 #define _I915_DRV_H_
 
+#include "i915_reg.h"
+
 /* General customization:
  */
 
@@ -37,7 +39,12 @@
 
 #define DRIVER_NAME		"i915"
 #define DRIVER_DESC		"Intel Graphics"
-#define DRIVER_DATE		"20060119"
+#define DRIVER_DATE		"20080730"
+
+enum pipe {
+	PIPE_A = 0,
+	PIPE_B,
+};
 
 /* Interface history:
  *
@@ -53,16 +60,23 @@
 #define DRIVER_MINOR		6
 #define DRIVER_PATCHLEVEL	0
 
+#define WATCH_COHERENCY	0
+#define WATCH_BUF	0
+#define WATCH_EXEC	0
+#define WATCH_LRU	0
+#define WATCH_RELOC	0
+#define WATCH_INACTIVE	0
+#define WATCH_PWRITE	0
+
 typedef struct _drm_i915_ring_buffer {
 	int tail_mask;
-	unsigned long Start;
-	unsigned long End;
 	unsigned long Size;
 	u8 *virtual_start;
 	int head;
 	int tail;
 	int space;
 	drm_local_map_t map;
+	struct drm_gem_object *ring_obj;
 } drm_i915_ring_buffer_t;
 
 struct mem_block {
@@ -76,13 +90,28 @@ struct mem_block {
 typedef struct _drm_i915_vbl_swap {
 	struct list_head head;
 	drm_drawable_t drw_id;
-	unsigned int pipe;
+	unsigned int plane;
 	unsigned int sequence;
 } drm_i915_vbl_swap_t;
 
+struct opregion_header;
+struct opregion_acpi;
+struct opregion_swsci;
+struct opregion_asle;
+
+struct intel_opregion {
+	struct opregion_header *header;
+	struct opregion_acpi *acpi;
+	struct opregion_swsci *swsci;
+	struct opregion_asle *asle;
+	int enabled;
+};
+
 typedef struct drm_i915_private {
+	struct drm_device *dev;
+
+	void __iomem *regs;
 	drm_local_map_t *sarea;
-	drm_local_map_t *mmio_map;
 
 	drm_i915_sarea_t *sarea_priv;
 	drm_i915_ring_buffer_t ring;
@@ -90,20 +119,25 @@ typedef struct drm_i915_private {
 	drm_dma_handle_t *status_page_dmah;
 	void *hw_status_page;
 	dma_addr_t dma_status_page;
-	unsigned long counter;
+	uint32_t counter;
 	unsigned int status_gfx_addr;
 	drm_local_map_t hws_map;
+	struct drm_gem_object *hws_obj;
 
 	unsigned int cpp;
 	int back_offset;
 	int front_offset;
 	int current_page;
 	int page_flipping;
-	int use_mi_batchbuffer_start;
 
 	wait_queue_head_t irq_queue;
 	atomic_t irq_received;
-	atomic_t irq_emitted;
+	/** Protects user_irq_refcount and irq_mask_reg */
+	spinlock_t user_irq_lock;
+	/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
+	int user_irq_refcount;
+	/** Cached value of IMR to avoid reads in updating the bitfield */
+	u32 irq_mask_reg;
 
 	int tex_lru_log_granularity;
 	int allow_batchbuffer;
@@ -115,6 +149,8 @@ typedef struct drm_i915_private {
 	drm_i915_vbl_swap_t vbl_swaps;
 	unsigned int swaps_pending;
 
+	struct intel_opregion opregion;
+
 	/* Register state */
 	u8 saveLBB;
 	u32 saveDSPACNTR;
@@ -139,7 +175,7 @@ typedef struct drm_i915_private {
 	u32 saveDSPASTRIDE;
 	u32 saveDSPASIZE;
 	u32 saveDSPAPOS;
-	u32 saveDSPABASE;
+	u32 saveDSPAADDR;
 	u32 saveDSPASURF;
 	u32 saveDSPATILEOFF;
 	u32 savePFIT_PGM_RATIOS;
@@ -160,24 +196,24 @@ typedef struct drm_i915_private {
 	u32 saveDSPBSTRIDE;
 	u32 saveDSPBSIZE;
 	u32 saveDSPBPOS;
-	u32 saveDSPBBASE;
+	u32 saveDSPBADDR;
 	u32 saveDSPBSURF;
 	u32 saveDSPBTILEOFF;
-	u32 saveVCLK_DIVISOR_VGA0;
-	u32 saveVCLK_DIVISOR_VGA1;
-	u32 saveVCLK_POST_DIV;
+	u32 saveVGA0;
+	u32 saveVGA1;
+	u32 saveVGA_PD;
 	u32 saveVGACNTRL;
 	u32 saveADPA;
 	u32 saveLVDS;
-	u32 saveLVDSPP_ON;
-	u32 saveLVDSPP_OFF;
+	u32 savePP_ON_DELAYS;
+	u32 savePP_OFF_DELAYS;
 	u32 saveDVOA;
 	u32 saveDVOB;
 	u32 saveDVOC;
 	u32 savePP_ON;
 	u32 savePP_OFF;
 	u32 savePP_CONTROL;
-	u32 savePP_CYCLE;
+	u32 savePP_DIVISOR;
 	u32 savePFIT_CONTROL;
 	u32 save_palette_a[256];
 	u32 save_palette_b[256];
@@ -190,7 +226,7 @@ typedef struct drm_i915_private {
 	u32 saveIMR;
 	u32 saveCACHE_MODE_0;
 	u32 saveD_STATE;
-	u32 saveDSPCLK_GATE_D;
+	u32 saveCG_2D_DIS;
 	u32 saveMI_ARB_STATE;
 	u32 saveSWF0[16];
 	u32 saveSWF1[16];
@@ -203,8 +239,180 @@ typedef struct drm_i915_private {
 	u8 saveDACMASK;
 	u8 saveDACDATA[256*3]; /* 256 3-byte colors */
 	u8 saveCR[37];
+
+	struct {
+		struct drm_mm gtt_space;
+
+		/**
+		 * List of objects currently involved in rendering from the
+		 * ringbuffer.
+		 *
+		 * A reference is held on the buffer while on this list.
+		 */
+		struct list_head active_list;
+
+		/**
+		 * List of objects which are not in the ringbuffer but which
+		 * still have a write_domain which needs to be flushed before
+		 * unbinding.
+		 *
+		 * A reference is held on the buffer while on this list.
+		 */
+		struct list_head flushing_list;
+
+		/**
+		 * LRU list of objects which are not in the ringbuffer and
+		 * are ready to unbind, but are still in the GTT.
+		 *
+		 * A reference is not held on the buffer while on this list,
+		 * as merely being GTT-bound shouldn't prevent its being
+		 * freed, and we'll pull it off the list in the free path.
+		 */
+		struct list_head inactive_list;
+
+		/**
+		 * List of breadcrumbs associated with GPU requests currently
+		 * outstanding.
+		 */
+		struct list_head request_list;
+
+		/**
+		 * We leave the user IRQ off as much as possible,
+		 * but this means that requests will finish and never
+		 * be retired once the system goes idle. Set a timer to
+		 * fire periodically while the ring is running. When it
+		 * fires, go retire requests.
+		 */
+		struct delayed_work retire_work;
+
+		/** Work task for vblank-related ring access */
+		struct work_struct vblank_work;
+
+		uint32_t next_gem_seqno;
+
+		/**
+		 * Waiting sequence number, if any
+		 */
+		uint32_t waiting_gem_seqno;
+
+		/**
+		 * Last seq seen at irq time
+		 */
+		uint32_t irq_gem_seqno;
+
+		/**
+		 * Flag if the X Server, and thus DRM, is not currently in
+		 * control of the device.
+		 *
+		 * This is set between LeaveVT and EnterVT.  It needs to be
+		 * replaced with a semaphore.  It also needs to be
+		 * transitioned away from for kernel modesetting.
+		 */
+		int suspended;
+
+		/**
+		 * Flag if the hardware appears to be wedged.
+		 *
+		 * This is set when attempts to idle the device timeout.
+		 * It prevents command submission from occuring and makes
+		 * every pending request fail
+		 */
+		int wedged;
+
+		/** Bit 6 swizzling required for X tiling */
+		uint32_t bit_6_swizzle_x;
+		/** Bit 6 swizzling required for Y tiling */
+		uint32_t bit_6_swizzle_y;
+	} mm;
 } drm_i915_private_t;
 
+/** driver private structure attached to each drm_gem_object */
+struct drm_i915_gem_object {
+	struct drm_gem_object *obj;
+
+	/** Current space allocated to this object in the GTT, if any. */
+	struct drm_mm_node *gtt_space;
+
+	/** This object's place on the active/flushing/inactive lists */
+	struct list_head list;
+
+	/**
+	 * This is set if the object is on the active or flushing lists
+	 * (has pending rendering), and is not set if it's on inactive (ready
+	 * to be unbound).
+	 */
+	int active;
+
+	/**
+	 * This is set if the object has been written to since last bound
+	 * to the GTT
+	 */
+	int dirty;
+
+	/** AGP memory structure for our GTT binding. */
+	DRM_AGP_MEM *agp_mem;
+
+	struct page **page_list;
+
+	/**
+	 * Current offset of the object in GTT space.
+	 *
+	 * This is the same as gtt_space->start
+	 */
+	uint32_t gtt_offset;
+
+	/** Boolean whether this object has a valid gtt offset. */
+	int gtt_bound;
+
+	/** How many users have pinned this object in GTT space */
+	int pin_count;
+
+	/** Breadcrumb of last rendering to the buffer. */
+	uint32_t last_rendering_seqno;
+
+	/** Current tiling mode for the object. */
+	uint32_t tiling_mode;
+
+	/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
+	uint32_t agp_type;
+
+	/**
+	 * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when
+	 * GEM_DOMAIN_CPU is not in the object's read domain.
+	 */
+	uint8_t *page_cpu_valid;
+};
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable
+ * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * an emission time with seqnos for tracking how far ahead of the GPU we are.
+ */
+struct drm_i915_gem_request {
+	/** GEM sequence number associated with this request. */
+	uint32_t seqno;
+
+	/** Time at which this request was emitted, in jiffies. */
+	unsigned long emitted_jiffies;
+
+	/** Cache domains that were flushed at the start of the request. */
+	uint32_t flush_domains;
+
+	struct list_head list;
+};
+
+struct drm_i915_file_private {
+	struct {
+		uint32_t last_gem_seqno;
+		uint32_t last_gem_throttle_seqno;
+	} mm;
+};
+
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 
@@ -212,31 +420,42 @@ extern int i915_max_ioctl;
 extern void i915_kernel_lost_context(struct drm_device * dev);
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
 extern void i915_driver_lastclose(struct drm_device * dev);
 extern void i915_driver_preclose(struct drm_device *dev,
 				 struct drm_file *file_priv);
+extern void i915_driver_postclose(struct drm_device *dev,
+				  struct drm_file *file_priv);
 extern int i915_driver_device_is_agp(struct drm_device * dev);
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
+extern int i915_emit_box(struct drm_device *dev,
+			 struct drm_clip_rect __user *boxes,
+			 int i, int DR1, int DR4);
 
 /* i915_irq.c */
 extern int i915_irq_emit(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
+void i915_user_irq_get(struct drm_device *dev);
+void i915_user_irq_put(struct drm_device *dev);
 
-extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern void i915_gem_vblank_work_handler(struct work_struct *work);
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
 extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern void i915_driver_irq_postinstall(struct drm_device * dev);
+extern int i915_driver_irq_postinstall(struct drm_device *dev);
 extern void i915_driver_irq_uninstall(struct drm_device * dev);
 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
+extern int i915_enable_vblank(struct drm_device *dev, int crtc);
+extern void i915_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
+extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
 
 /* i915_mem.c */
 extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -250,11 +469,99 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
 extern void i915_mem_takedown(struct mem_block **heap);
 extern void i915_mem_release(struct drm_device * dev,
 			     struct drm_file *file_priv, struct mem_block *heap);
+/* i915_gem.c */
+int i915_gem_init_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_create_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+int i915_gem_execbuffer(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+int i915_gem_set_tiling(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int i915_gem_get_tiling(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+void i915_gem_load(struct drm_device *dev);
+int i915_gem_proc_init(struct drm_minor *minor);
+void i915_gem_proc_cleanup(struct drm_minor *minor);
+int i915_gem_init_object(struct drm_gem_object *obj);
+void i915_gem_free_object(struct drm_gem_object *obj);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+void i915_gem_object_unpin(struct drm_gem_object *obj);
+void i915_gem_lastclose(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_clflush_object(struct drm_gem_object *obj);
+
+/* i915_gem_tiling.c */
+void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+
+/* i915_gem_debug.c */
+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+			  const char *where, uint32_t mark);
+#if WATCH_INACTIVE
+void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#else
+#define i915_verify_inactive(dev, file, line)
+#endif
+void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
+void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+			  const char *where, uint32_t mark);
+void i915_dump_lru(struct drm_device *dev, const char *where);
+
+/* i915_suspend.c */
+extern int i915_save_state(struct drm_device *dev);
+extern int i915_restore_state(struct drm_device *dev);
+
+/* i915_suspend.c */
+extern int i915_save_state(struct drm_device *dev);
+extern int i915_restore_state(struct drm_device *dev);
+
+/* i915_opregion.c */
+extern int intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_free(struct drm_device *dev);
+extern void opregion_asle_intr(struct drm_device *dev);
+extern void opregion_enable_asle(struct drm_device *dev);
+
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {			\
+	if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
+} while (0)
 
-#define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
-#define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
-#define I915_READ16(reg)	DRM_READ16(dev_priv->mmio_map, (reg))
-#define I915_WRITE16(reg,val)	DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+#define I915_READ(reg)          readl(dev_priv->regs + (reg))
+#define I915_WRITE(reg, val)     writel(val, dev_priv->regs + (reg))
+#define I915_READ16(reg)	readw(dev_priv->regs + (reg))
+#define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
+#define I915_READ8(reg)		readb(dev_priv->regs + (reg))
+#define I915_WRITE8(reg, val)	writeb(val, dev_priv->regs + (reg))
 
 #define I915_VERBOSE 0
 
@@ -284,816 +591,29 @@ extern void i915_mem_release(struct drm_device * dev,
 	if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);	\
 	dev_priv->ring.tail = outring;					\
 	dev_priv->ring.space -= outcount * 4;				\
-	I915_WRITE(LP_RING + RING_TAIL, outring);			\
+	I915_WRITE(PRB0_TAIL, outring);			\
 } while(0)
 
-extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-
-/* Extended config space */
-#define LBB 0xf4
-
-/* VGA stuff */
-
-#define VGA_ST01_MDA 0x3ba
-#define VGA_ST01_CGA 0x3da
-
-#define VGA_MSR_WRITE 0x3c2
-#define VGA_MSR_READ 0x3cc
-#define   VGA_MSR_MEM_EN (1<<1)
-#define   VGA_MSR_CGA_MODE (1<<0)
-
-#define VGA_SR_INDEX 0x3c4
-#define VGA_SR_DATA 0x3c5
-
-#define VGA_AR_INDEX 0x3c0
-#define   VGA_AR_VID_EN (1<<5)
-#define VGA_AR_DATA_WRITE 0x3c0
-#define VGA_AR_DATA_READ 0x3c1
-
-#define VGA_GR_INDEX 0x3ce
-#define VGA_GR_DATA 0x3cf
-/* GR05 */
-#define   VGA_GR_MEM_READ_MODE_SHIFT 3
-#define     VGA_GR_MEM_READ_MODE_PLANE 1
-/* GR06 */
-#define   VGA_GR_MEM_MODE_MASK 0xc
-#define   VGA_GR_MEM_MODE_SHIFT 2
-#define   VGA_GR_MEM_A0000_AFFFF 0
-#define   VGA_GR_MEM_A0000_BFFFF 1
-#define   VGA_GR_MEM_B0000_B7FFF 2
-#define   VGA_GR_MEM_B0000_BFFFF 3
-
-#define VGA_DACMASK 0x3c6
-#define VGA_DACRX 0x3c7
-#define VGA_DACWX 0x3c8
-#define VGA_DACDATA 0x3c9
-
-#define VGA_CR_INDEX_MDA 0x3b4
-#define VGA_CR_DATA_MDA 0x3b5
-#define VGA_CR_INDEX_CGA 0x3d4
-#define VGA_CR_DATA_CGA 0x3d5
-
-#define GFX_OP_USER_INTERRUPT		((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD			(7<<23)
-#define CMD_STORE_DWORD_IDX		((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
-
-#define INST_PARSER_CLIENT   0x00000000
-#define INST_OP_FLUSH        0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define BB1_START_ADDR_MASK   (~0x7)
-#define BB1_PROTECTED         (1<<0)
-#define BB1_UNPROTECTED       (0<<0)
-#define BB2_END_ADDR_MASK     (~0x7)
-
-/* Framebuffer compression */
-#define FBC_CFB_BASE		0x03200 /* 4k page aligned */
-#define FBC_LL_BASE		0x03204 /* 4k page aligned */
-#define FBC_CONTROL		0x03208
-#define   FBC_CTL_EN		(1<<31)
-#define   FBC_CTL_PERIODIC	(1<<30)
-#define   FBC_CTL_INTERVAL_SHIFT (16)
-#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
-#define   FBC_CTL_STRIDE_SHIFT	(5)
-#define   FBC_CTL_FENCENO	(1<<0)
-#define FBC_COMMAND		0x0320c
-#define   FBC_CMD_COMPRESS	(1<<0)
-#define FBC_STATUS		0x03210
-#define   FBC_STAT_COMPRESSING	(1<<31)
-#define   FBC_STAT_COMPRESSED	(1<<30)
-#define   FBC_STAT_MODIFIED	(1<<29)
-#define   FBC_STAT_CURRENT_LINE	(1<<0)
-#define FBC_CONTROL2		0x03214
-#define   FBC_CTL_FENCE_DBL	(0<<4)
-#define   FBC_CTL_IDLE_IMM	(0<<2)
-#define   FBC_CTL_IDLE_FULL	(1<<2)
-#define   FBC_CTL_IDLE_LINE	(2<<2)
-#define   FBC_CTL_IDLE_DEBUG	(3<<2)
-#define   FBC_CTL_CPU_FENCE	(1<<1)
-#define   FBC_CTL_PLANEA	(0<<0)
-#define   FBC_CTL_PLANEB	(1<<0)
-#define FBC_FENCE_OFF		0x0321b
-
-#define FBC_LL_SIZE		(1536)
-#define FBC_LL_PAD		(32)
-
-/* Interrupt bits:
- */
-#define USER_INT_FLAG    (1<<1)
-#define VSYNC_PIPEB_FLAG (1<<5)
-#define VSYNC_PIPEA_FLAG (1<<7)
-#define HWB_OOM_FLAG     (1<<13) /* binner out of memory */
-
-#define I915REG_HWSTAM		0x02098
-#define I915REG_INT_IDENTITY_R	0x020a4
-#define I915REG_INT_MASK_R	0x020a8
-#define I915REG_INT_ENABLE_R	0x020a0
-
-#define I915REG_PIPEASTAT	0x70024
-#define I915REG_PIPEBSTAT	0x71024
-
-#define I915_VBLANK_INTERRUPT_ENABLE	(1UL<<17)
-#define I915_VBLANK_CLEAR		(1UL<<1)
-
-#define SRX_INDEX		0x3c4
-#define SRX_DATA		0x3c5
-#define SR01			1
-#define SR01_SCREEN_OFF		(1<<5)
-
-#define PPCR			0x61204
-#define PPCR_ON			(1<<0)
-
-#define DVOB			0x61140
-#define DVOB_ON			(1<<31)
-#define DVOC			0x61160
-#define DVOC_ON			(1<<31)
-#define LVDS			0x61180
-#define LVDS_ON			(1<<31)
-
-#define ADPA			0x61100
-#define ADPA_DPMS_MASK		(~(3<<10))
-#define ADPA_DPMS_ON		(0<<10)
-#define ADPA_DPMS_SUSPEND	(1<<10)
-#define ADPA_DPMS_STANDBY	(2<<10)
-#define ADPA_DPMS_OFF		(3<<10)
-
-#define NOPID                   0x2094
-#define LP_RING			0x2030
-#define HP_RING			0x2040
-/* The binner has its own ring buffer:
- */
-#define HWB_RING		0x2400
-
-#define RING_TAIL		0x00
-#define TAIL_ADDR		0x001FFFF8
-#define RING_HEAD		0x04
-#define HEAD_WRAP_COUNT		0xFFE00000
-#define HEAD_WRAP_ONE		0x00200000
-#define HEAD_ADDR		0x001FFFFC
-#define RING_START		0x08
-#define START_ADDR		0x0xFFFFF000
-#define RING_LEN		0x0C
-#define RING_NR_PAGES		0x001FF000
-#define RING_REPORT_MASK	0x00000006
-#define RING_REPORT_64K		0x00000002
-#define RING_REPORT_128K	0x00000004
-#define RING_NO_REPORT		0x00000000
-#define RING_VALID_MASK		0x00000001
-#define RING_VALID		0x00000001
-#define RING_INVALID		0x00000000
-
-/* Instruction parser error reg:
- */
-#define IPEIR			0x2088
-
-/* Scratch pad debug 0 reg:
- */
-#define SCPD0			0x209c
-
-/* Error status reg:
- */
-#define ESR			0x20b8
-
-/* Secondary DMA fetch address debug reg:
- */
-#define DMA_FADD_S		0x20d4
-
-/* Memory Interface Arbitration State
- */
-#define MI_ARB_STATE		0x20e4
-
-/* Cache mode 0 reg.
- *  - Manipulating render cache behaviour is central
- *    to the concept of zone rendering, tuning this reg can help avoid
- *    unnecessary render cache reads and even writes (for z/stencil)
- *    at beginning and end of scene.
- *
- * - To change a bit, write to this reg with a mask bit set and the
- * bit of interest either set or cleared.  EG: (BIT<<16) | BIT to set.
- */
-#define Cache_Mode_0		0x2120
-#define CACHE_MODE_0		0x2120
-#define CM0_MASK_SHIFT          16
-#define CM0_IZ_OPT_DISABLE      (1<<6)
-#define CM0_ZR_OPT_DISABLE      (1<<5)
-#define CM0_DEPTH_EVICT_DISABLE (1<<4)
-#define CM0_COLOR_EVICT_DISABLE (1<<3)
-#define CM0_DEPTH_WRITE_DISABLE (1<<1)
-#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
-
-
-/* Graphics flush control.  A CPU write flushes the GWB of all writes.
- * The data is discarded.
- */
-#define GFX_FLSH_CNTL		0x2170
-
-/* Binner control.  Defines the location of the bin pointer list:
- */
-#define BINCTL			0x2420
-#define BC_MASK			(1 << 9)
-
-/* Binned scene info.
- */
-#define BINSCENE		0x2428
-#define BS_OP_LOAD		(1 << 8)
-#define BS_MASK			(1 << 22)
-
-/* Bin command parser debug reg:
- */
-#define BCPD			0x2480
-
-/* Bin memory control debug reg:
- */
-#define BMCD			0x2484
-
-/* Bin data cache debug reg:
- */
-#define BDCD			0x2488
-
-/* Binner pointer cache debug reg:
- */
-#define BPCD			0x248c
-
-/* Binner scratch pad debug reg:
- */
-#define BINSKPD			0x24f0
-
-/* HWB scratch pad debug reg:
- */
-#define HWBSKPD			0x24f4
-
-/* Binner memory pool reg:
- */
-#define BMP_BUFFER		0x2430
-#define BMP_PAGE_SIZE_4K	(0 << 10)
-#define BMP_BUFFER_SIZE_SHIFT	1
-#define BMP_ENABLE		(1 << 0)
-
-/* Get/put memory from the binner memory pool:
- */
-#define BMP_GET			0x2438
-#define BMP_PUT			0x2440
-#define BMP_OFFSET_SHIFT	5
-
-/* 3D state packets:
- */
-#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
-
-#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR       (0x1<<1)
-#define SC_ENABLE_MASK          (0x1<<0)
-#define SC_ENABLE               (0x1<<0)
-
-#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
-
-#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK      (0xffff<<16)
-#define SCI_XMIN_MASK      (0xffff<<0)
-#define SCI_YMAX_MASK      (0xffff<<16)
-#define SCI_XMAX_MASK      (0xffff<<0)
-
-#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-
-#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
-
-#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
-#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20)
-#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15)
-#define XY_SRC_COPY_BLT_DST_TILED	(1<<11)
-
-#define MI_BATCH_BUFFER		((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START	(0x31<<23)
-#define MI_BATCH_BUFFER_END	(0xA<<23)
-#define MI_BATCH_NON_SECURE	(1)
-#define MI_BATCH_NON_SECURE_I965 (1<<8)
-
-#define MI_WAIT_FOR_EVENT       ((0x3<<23))
-#define MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
-#define MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-
-#define MI_LOAD_SCAN_LINES_INCL  ((0x12<<23))
-
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define ASYNC_FLIP                (1<<22)
-#define DISPLAY_PLANE_A           (0<<20)
-#define DISPLAY_PLANE_B           (1<<20)
-
-/* Display regs */
-#define DSPACNTR                0x70180
-#define DSPBCNTR                0x71180
-#define DISPPLANE_SEL_PIPE_MASK                 (1<<24)
-
-/* Define the region of interest for the binner:
- */
-#define CMD_OP_BIN_CONTROL	 ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
-
-#define CMD_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-
-#define CMD_MI_FLUSH         (0x04 << 23)
-#define MI_NO_WRITE_FLUSH    (1 << 2)
-#define MI_READ_FLUSH        (1 << 0)
-#define MI_EXE_FLUSH         (1 << 1)
-#define MI_END_SCENE         (1 << 4) /* flush binner and incr scene count */
-#define MI_SCENE_COUNT       (1 << 3) /* just increment scene count */
-
-#define BREADCRUMB_BITS 31
-#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
-
-#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
-#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
-
-#define BLC_PWM_CTL		0x61254
-#define BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
-
-#define BLC_PWM_CTL2		0x61250
-/**
- * This is the most significant 15 bits of the number of backlight cycles in a
- * complete cycle of the modulated backlight control.
- *
- * The actual value is this field multiplied by two.
- */
-#define BACKLIGHT_MODULATION_FREQ_MASK		(0x7fff << 17)
-#define BLM_LEGACY_MODE				(1 << 16)
-/**
- * This is the number of cycles out of the backlight modulation cycle for which
- * the backlight is on.
- *
- * This field must be no greater than the number of cycles in the complete
- * backlight modulation cycle.
- */
-#define BACKLIGHT_DUTY_CYCLE_SHIFT		(0)
-#define BACKLIGHT_DUTY_CYCLE_MASK		(0xffff)
-
-#define I915_GCFGC			0xf0
-#define I915_LOW_FREQUENCY_ENABLE		(1 << 7)
-#define I915_DISPLAY_CLOCK_190_200_MHZ		(0 << 4)
-#define I915_DISPLAY_CLOCK_333_MHZ		(4 << 4)
-#define I915_DISPLAY_CLOCK_MASK			(7 << 4)
-
-#define I855_HPLLCC			0xc0
-#define I855_CLOCK_CONTROL_MASK			(3 << 0)
-#define I855_CLOCK_133_200			(0 << 0)
-#define I855_CLOCK_100_200			(1 << 0)
-#define I855_CLOCK_100_133			(2 << 0)
-#define I855_CLOCK_166_250			(3 << 0)
-
-/* p317, 319
- */
-#define VCLK2_VCO_M        0x6008 /* treat as 16 bit? (includes msbs) */
-#define VCLK2_VCO_N        0x600a
-#define VCLK2_VCO_DIV_SEL  0x6012
-
-#define VCLK_DIVISOR_VGA0   0x6000
-#define VCLK_DIVISOR_VGA1   0x6004
-#define VCLK_POST_DIV	    0x6010
-/** Selects a post divisor of 4 instead of 2. */
-# define VGA1_PD_P2_DIV_4	(1 << 15)
-/** Overrides the p2 post divisor field */
-# define VGA1_PD_P1_DIV_2	(1 << 13)
-# define VGA1_PD_P1_SHIFT	8
-/** P1 value is 2 greater than this field */
-# define VGA1_PD_P1_MASK	(0x1f << 8)
-/** Selects a post divisor of 4 instead of 2. */
-# define VGA0_PD_P2_DIV_4	(1 << 7)
-/** Overrides the p2 post divisor field */
-# define VGA0_PD_P1_DIV_2	(1 << 5)
-# define VGA0_PD_P1_SHIFT	0
-/** P1 value is 2 greater than this field */
-# define VGA0_PD_P1_MASK	(0x1f << 0)
-
-/* PCI D state control register */
-#define D_STATE		0x6104
-#define DSPCLK_GATE_D	0x6200
-
-/* I830 CRTC registers */
-#define HTOTAL_A	0x60000
-#define HBLANK_A	0x60004
-#define HSYNC_A		0x60008
-#define VTOTAL_A	0x6000c
-#define VBLANK_A	0x60010
-#define VSYNC_A		0x60014
-#define PIPEASRC	0x6001c
-#define BCLRPAT_A	0x60020
-#define VSYNCSHIFT_A	0x60028
-
-#define HTOTAL_B	0x61000
-#define HBLANK_B	0x61004
-#define HSYNC_B		0x61008
-#define VTOTAL_B	0x6100c
-#define VBLANK_B	0x61010
-#define VSYNC_B		0x61014
-#define PIPEBSRC	0x6101c
-#define BCLRPAT_B	0x61020
-#define VSYNCSHIFT_B	0x61028
-
-#define PP_STATUS	0x61200
-# define PP_ON					(1 << 31)
-/**
- * Indicates that all dependencies of the panel are on:
- *
- * - PLL enabled
- * - pipe enabled
- * - LVDS/DVOB/DVOC on
- */
-# define PP_READY				(1 << 30)
-# define PP_SEQUENCE_NONE			(0 << 28)
-# define PP_SEQUENCE_ON				(1 << 28)
-# define PP_SEQUENCE_OFF			(2 << 28)
-# define PP_SEQUENCE_MASK			0x30000000
-#define PP_CONTROL	0x61204
-# define POWER_TARGET_ON			(1 << 0)
-
-#define LVDSPP_ON       0x61208
-#define LVDSPP_OFF      0x6120c
-#define PP_CYCLE        0x61210
-
-#define PFIT_CONTROL	0x61230
-# define PFIT_ENABLE				(1 << 31)
-# define PFIT_PIPE_MASK				(3 << 29)
-# define PFIT_PIPE_SHIFT			29
-# define VERT_INTERP_DISABLE			(0 << 10)
-# define VERT_INTERP_BILINEAR			(1 << 10)
-# define VERT_INTERP_MASK			(3 << 10)
-# define VERT_AUTO_SCALE			(1 << 9)
-# define HORIZ_INTERP_DISABLE			(0 << 6)
-# define HORIZ_INTERP_BILINEAR			(1 << 6)
-# define HORIZ_INTERP_MASK			(3 << 6)
-# define HORIZ_AUTO_SCALE			(1 << 5)
-# define PANEL_8TO6_DITHER_ENABLE		(1 << 3)
-
-#define PFIT_PGM_RATIOS	0x61234
-# define PFIT_VERT_SCALE_MASK			0xfff00000
-# define PFIT_HORIZ_SCALE_MASK			0x0000fff0
-
-#define PFIT_AUTO_RATIOS	0x61238
-
-
-#define DPLL_A		0x06014
-#define DPLL_B		0x06018
-# define DPLL_VCO_ENABLE			(1 << 31)
-# define DPLL_DVO_HIGH_SPEED			(1 << 30)
-# define DPLL_SYNCLOCK_ENABLE			(1 << 29)
-# define DPLL_VGA_MODE_DIS			(1 << 28)
-# define DPLLB_MODE_DAC_SERIAL			(1 << 26) /* i915 */
-# define DPLLB_MODE_LVDS			(2 << 26) /* i915 */
-# define DPLL_MODE_MASK				(3 << 26)
-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10	(0 << 24) /* i915 */
-# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5		(1 << 24) /* i915 */
-# define DPLLB_LVDS_P2_CLOCK_DIV_14		(0 << 24) /* i915 */
-# define DPLLB_LVDS_P2_CLOCK_DIV_7		(1 << 24) /* i915 */
-# define DPLL_P2_CLOCK_DIV_MASK			0x03000000 /* i915 */
-# define DPLL_FPA01_P1_POST_DIV_MASK		0x00ff0000 /* i915 */
-/**
- *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
- * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
- */
-# define DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
-/**
- * The i830 generation, in LVDS mode, defines P1 as the bit number set within
- * this field (only one bit may be set).
- */
-# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
-# define DPLL_FPA01_P1_POST_DIV_SHIFT		16
-# define PLL_P2_DIVIDE_BY_4			(1 << 23) /* i830, required in DVO non-gang */
-# define PLL_P1_DIVIDE_BY_TWO			(1 << 21) /* i830 */
-# define PLL_REF_INPUT_DREFCLK			(0 << 13)
-# define PLL_REF_INPUT_TVCLKINA			(1 << 13) /* i830 */
-# define PLL_REF_INPUT_TVCLKINBC		(2 << 13) /* SDVO TVCLKIN */
-# define PLLB_REF_INPUT_SPREADSPECTRUMIN	(3 << 13)
-# define PLL_REF_INPUT_MASK			(3 << 13)
-# define PLL_LOAD_PULSE_PHASE_SHIFT		9
-/*
- * Parallel to Serial Load Pulse phase selection.
- * Selects the phase for the 10X DPLL clock for the PCIe
- * digital display port. The range is 4 to 13; 10 or more
- * is just a flip delay. The default is 6
- */
-# define PLL_LOAD_PULSE_PHASE_MASK		(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
-# define DISPLAY_RATE_SELECT_FPA1		(1 << 8)
-
-/**
- * SDVO multiplier for 945G/GM. Not used on 965.
- *
- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
- */
-# define SDVO_MULTIPLIER_MASK			0x000000ff
-# define SDVO_MULTIPLIER_SHIFT_HIRES		4
-# define SDVO_MULTIPLIER_SHIFT_VGA		0
-
-/** @defgroup DPLL_MD
- * @{
- */
-/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
-#define DPLL_A_MD		0x0601c
-/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
-#define DPLL_B_MD		0x06020
-/**
- * UDI pixel divider, controlling how many pixels are stuffed into a packet.
- *
- * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
- */
-# define DPLL_MD_UDI_DIVIDER_MASK		0x3f000000
-# define DPLL_MD_UDI_DIVIDER_SHIFT		24
-/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
-# define DPLL_MD_VGA_UDI_DIVIDER_MASK		0x003f0000
-# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT		16
-/**
- * SDVO/UDI pixel multiplier.
- *
- * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
- * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
- * modes, the bus rate would be below the limits, so SDVO allows for stuffing
- * dummy bytes in the datastream at an increased clock rate, with both sides of
- * the link knowing how many bytes are fill.
- *
- * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
- * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
- * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
- * through an SDVO command.
- *
- * This register field has values of multiplication factor minus 1, with
- * a maximum multiplier of 5 for SDVO.
- */
-# define DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00
-# define DPLL_MD_UDI_MULTIPLIER_SHIFT		8
-/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
- * This best be set to the default value (3) or the CRT won't work. No,
- * I don't entirely understand what this does...
- */
-# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
-# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
-/** @} */
-
-#define DPLL_TEST		0x606c
-# define DPLLB_TEST_SDVO_DIV_1			(0 << 22)
-# define DPLLB_TEST_SDVO_DIV_2			(1 << 22)
-# define DPLLB_TEST_SDVO_DIV_4			(2 << 22)
-# define DPLLB_TEST_SDVO_DIV_MASK		(3 << 22)
-# define DPLLB_TEST_N_BYPASS			(1 << 19)
-# define DPLLB_TEST_M_BYPASS			(1 << 18)
-# define DPLLB_INPUT_BUFFER_ENABLE		(1 << 16)
-# define DPLLA_TEST_N_BYPASS			(1 << 3)
-# define DPLLA_TEST_M_BYPASS			(1 << 2)
-# define DPLLA_INPUT_BUFFER_ENABLE		(1 << 0)
-
-#define ADPA			0x61100
-#define ADPA_DAC_ENABLE		(1<<31)
-#define ADPA_DAC_DISABLE	0
-#define ADPA_PIPE_SELECT_MASK	(1<<30)
-#define ADPA_PIPE_A_SELECT	0
-#define ADPA_PIPE_B_SELECT	(1<<30)
-#define ADPA_USE_VGA_HVPOLARITY (1<<15)
-#define ADPA_SETS_HVPOLARITY	0
-#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
-#define ADPA_VSYNC_CNTL_ENABLE	0
-#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
-#define ADPA_HSYNC_CNTL_ENABLE	0
-#define ADPA_VSYNC_ACTIVE_HIGH	(1<<4)
-#define ADPA_VSYNC_ACTIVE_LOW	0
-#define ADPA_HSYNC_ACTIVE_HIGH	(1<<3)
-#define ADPA_HSYNC_ACTIVE_LOW	0
-
-#define FPA0		0x06040
-#define FPA1		0x06044
-#define FPB0		0x06048
-#define FPB1		0x0604c
-# define FP_N_DIV_MASK				0x003f0000
-# define FP_N_DIV_SHIFT				16
-# define FP_M1_DIV_MASK				0x00003f00
-# define FP_M1_DIV_SHIFT			8
-# define FP_M2_DIV_MASK				0x0000003f
-# define FP_M2_DIV_SHIFT			0
-
-
-#define PORT_HOTPLUG_EN		0x61110
-# define SDVOB_HOTPLUG_INT_EN			(1 << 26)
-# define SDVOC_HOTPLUG_INT_EN			(1 << 25)
-# define TV_HOTPLUG_INT_EN			(1 << 18)
-# define CRT_HOTPLUG_INT_EN			(1 << 9)
-# define CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
-
-#define PORT_HOTPLUG_STAT	0x61114
-# define CRT_HOTPLUG_INT_STATUS			(1 << 11)
-# define TV_HOTPLUG_INT_STATUS			(1 << 10)
-# define CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
-# define CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
-# define CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
-# define CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
-# define SDVOC_HOTPLUG_INT_STATUS		(1 << 7)
-# define SDVOB_HOTPLUG_INT_STATUS		(1 << 6)
-
-#define SDVOB			0x61140
-#define SDVOC			0x61160
-#define SDVO_ENABLE				(1 << 31)
-#define SDVO_PIPE_B_SELECT			(1 << 30)
-#define SDVO_STALL_SELECT			(1 << 29)
-#define SDVO_INTERRUPT_ENABLE			(1 << 26)
 /**
- * 915G/GM SDVO pixel multiplier.
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
  *
- * Programmed value is multiplier - 1, up to 5x.
- *
- * \sa DPLL_MD_UDI_MULTIPLIER_MASK
- */
-#define SDVO_PORT_MULTIPLY_MASK			(7 << 23)
-#define SDVO_PORT_MULTIPLY_SHIFT		23
-#define SDVO_PHASE_SELECT_MASK			(15 << 19)
-#define SDVO_PHASE_SELECT_DEFAULT		(6 << 19)
-#define SDVO_CLOCK_OUTPUT_INVERT		(1 << 18)
-#define SDVOC_GANG_MODE				(1 << 16)
-#define SDVO_BORDER_ENABLE			(1 << 7)
-#define SDVOB_PCIE_CONCURRENCY			(1 << 3)
-#define SDVO_DETECTED				(1 << 2)
-/* Bits to be preserved when writing */
-#define SDVOB_PRESERVE_MASK			((1 << 17) | (1 << 16) | (1 << 14))
-#define SDVOC_PRESERVE_MASK			(1 << 17)
-
-/** @defgroup LVDS
- * @{
- */
-/**
- * This register controls the LVDS output enable, pipe selection, and data
- * format selection.
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
  *
- * All of the clock/data pairs are force powered down by power sequencing.
- */
-#define LVDS			0x61180
-/**
- * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
- * the DPLL semantics change when the LVDS is assigned to that pipe.
- */
-# define LVDS_PORT_EN			(1 << 31)
-/** Selects pipe B for LVDS data.  Must be set on pre-965. */
-# define LVDS_PIPEB_SELECT		(1 << 30)
-
-/**
- * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
- * pixel.
- */
-# define LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
-# define LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
-# define LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
-/**
- * Controls the A3 data pair, which contains the additional LSBs for 24 bit
- * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
- * on.
- */
-# define LVDS_A3_POWER_MASK		(3 << 6)
-# define LVDS_A3_POWER_DOWN		(0 << 6)
-# define LVDS_A3_POWER_UP		(3 << 6)
-/**
- * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
- * is set.
- */
-# define LVDS_CLKB_POWER_MASK		(3 << 4)
-# define LVDS_CLKB_POWER_DOWN		(0 << 4)
-# define LVDS_CLKB_POWER_UP		(3 << 4)
-
-/**
- * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
- * setting for whether we are in dual-channel mode.  The B3 pair will
- * additionally only be powered up when LVDS_A3_POWER_UP is set.
- */
-# define LVDS_B0B3_POWER_MASK		(3 << 2)
-# define LVDS_B0B3_POWER_DOWN		(0 << 2)
-# define LVDS_B0B3_POWER_UP		(3 << 2)
-
-#define PIPEACONF 0x70008
-#define PIPEACONF_ENABLE	(1<<31)
-#define PIPEACONF_DISABLE	0
-#define PIPEACONF_DOUBLE_WIDE	(1<<30)
-#define I965_PIPECONF_ACTIVE	(1<<30)
-#define PIPEACONF_SINGLE_WIDE	0
-#define PIPEACONF_PIPE_UNLOCKED 0
-#define PIPEACONF_PIPE_LOCKED	(1<<25)
-#define PIPEACONF_PALETTE	0
-#define PIPEACONF_GAMMA		(1<<24)
-#define PIPECONF_FORCE_BORDER	(1<<25)
-#define PIPECONF_PROGRESSIVE	(0 << 21)
-#define PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
-#define PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
-
-#define DSPARB	  0x70030
-#define DSPARB_CSTART_MASK	(0x7f << 7)
-#define DSPARB_CSTART_SHIFT	7
-#define DSPARB_BSTART_MASK	(0x7f)		 
-#define DSPARB_BSTART_SHIFT	0
-
-#define PIPEBCONF 0x71008
-#define PIPEBCONF_ENABLE	(1<<31)
-#define PIPEBCONF_DISABLE	0
-#define PIPEBCONF_DOUBLE_WIDE	(1<<30)
-#define PIPEBCONF_DISABLE	0
-#define PIPEBCONF_GAMMA		(1<<24)
-#define PIPEBCONF_PALETTE	0
-
-#define PIPEBGCMAXRED		0x71010
-#define PIPEBGCMAXGREEN		0x71014
-#define PIPEBGCMAXBLUE		0x71018
-#define PIPEBSTAT		0x71024
-#define PIPEBFRAMEHIGH		0x71040
-#define PIPEBFRAMEPIXEL		0x71044
-
-#define DSPACNTR		0x70180
-#define DSPBCNTR		0x71180
-#define DISPLAY_PLANE_ENABLE			(1<<31)
-#define DISPLAY_PLANE_DISABLE			0
-#define DISPPLANE_GAMMA_ENABLE			(1<<30)
-#define DISPPLANE_GAMMA_DISABLE			0
-#define DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
-#define DISPPLANE_8BPP				(0x2<<26)
-#define DISPPLANE_15_16BPP			(0x4<<26)
-#define DISPPLANE_16BPP				(0x5<<26)
-#define DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
-#define DISPPLANE_32BPP				(0x7<<26)
-#define DISPPLANE_STEREO_ENABLE			(1<<25)
-#define DISPPLANE_STEREO_DISABLE		0
-#define DISPPLANE_SEL_PIPE_MASK			(1<<24)
-#define DISPPLANE_SEL_PIPE_A			0
-#define DISPPLANE_SEL_PIPE_B			(1<<24)
-#define DISPPLANE_SRC_KEY_ENABLE		(1<<22)
-#define DISPPLANE_SRC_KEY_DISABLE		0
-#define DISPPLANE_LINE_DOUBLE			(1<<20)
-#define DISPPLANE_NO_LINE_DOUBLE		0
-#define DISPPLANE_STEREO_POLARITY_FIRST		0
-#define DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
-/* plane B only */
-#define DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
-#define DISPPLANE_ALPHA_TRANS_DISABLE		0
-#define DISPPLANE_SPRITE_ABOVE_DISPLAYA		0
-#define DISPPLANE_SPRITE_ABOVE_OVERLAY		(1)
-
-#define DSPABASE		0x70184
-#define DSPASTRIDE		0x70188
-
-#define DSPBBASE		0x71184
-#define DSPBADDR		DSPBBASE
-#define DSPBSTRIDE		0x71188
-
-#define DSPAKEYVAL		0x70194
-#define DSPAKEYMASK		0x70198
-
-#define DSPAPOS			0x7018C /* reserved */
-#define DSPASIZE		0x70190
-#define DSPBPOS			0x7118C
-#define DSPBSIZE		0x71190
-
-#define DSPASURF		0x7019C
-#define DSPATILEOFF		0x701A4
-
-#define DSPBSURF		0x7119C
-#define DSPBTILEOFF		0x711A4
-
-#define VGACNTRL		0x71400
-# define VGA_DISP_DISABLE			(1 << 31)
-# define VGA_2X_MODE				(1 << 30)
-# define VGA_PIPE_B_SELECT			(1 << 29)
-
-/*
- * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
- * of video memory available to the BIOS in SWF1.
- */
-
-#define SWF0			0x71410
-
-/*
- * 855 scratch registers.
- */
-#define SWF10			0x70410
-
-#define SWF30			0x72414
-
-/*
- * Overlay registers.  These are overlay registers accessed via MMIO.
- * Those loaded via the overlay register page are defined in i830_video.c.
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
-#define OVADD			0x30000
-
-#define DOVSTA			0x30008
-#define OC_BUF			(0x3<<20)
+#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5)
+#define I915_GEM_HWS_INDEX		0x20
 
-#define OGAMC5			0x30010
-#define OGAMC4			0x30014
-#define OGAMC3			0x30018
-#define OGAMC2			0x3001c
-#define OGAMC1			0x30020
-#define OGAMC0			0x30024
-/*
- * Palette registers
- */
-#define PALETTE_A		0x0a000
-#define PALETTE_B		0x0a800
+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
@@ -1119,7 +639,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 
 #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
 
-#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
 
 #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
 		     (dev)->pci_device == 0x2E12 || \
@@ -1133,9 +653,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
 
 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
-			IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
+			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
 
-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev))
+#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
new file mode 100644
index 000000000000..9ac73dd1b422
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -0,0 +1,2558 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include <linux/swap.h>
+
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+			    uint32_t read_domains,
+			    uint32_t write_domain);
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+				 uint64_t offset,
+				 uint64_t size,
+				 uint32_t read_domains,
+				 uint32_t write_domain);
+static int
+i915_gem_set_domain(struct drm_gem_object *obj,
+		    struct drm_file *file_priv,
+		    uint32_t read_domains,
+		    uint32_t write_domain);
+static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_init *args = data;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (args->gtt_start >= args->gtt_end ||
+	    (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
+	    (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
+	    args->gtt_end - args->gtt_start);
+
+	dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_i915_gem_create *args = data;
+	struct drm_gem_object *obj;
+	int handle, ret;
+
+	args->size = roundup(args->size, PAGE_SIZE);
+
+	/* Allocate the new object */
+	obj = drm_gem_object_alloc(dev, args->size);
+	if (obj == NULL)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_handle_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+
+	return 0;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_i915_gem_pread *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	ssize_t read;
+	loff_t offset;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+	obj_priv = obj->driver_private;
+
+	/* Bounds check source.
+	 *
+	 * XXX: This could use review for overflow issues...
+	 */
+	if (args->offset > obj->size || args->size > obj->size ||
+	    args->offset + args->size > obj->size) {
+		drm_gem_object_unreference(obj);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
+					       I915_GEM_DOMAIN_CPU, 0);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	offset = args->offset;
+
+	read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+			args->size, &offset);
+	if (read != args->size) {
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		if (read < 0)
+			return read;
+		else
+			return -EINVAL;
+	}
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int
+i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+		    struct drm_i915_gem_pwrite *args,
+		    struct drm_file *file_priv)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	ssize_t remain;
+	loff_t offset;
+	char __user *user_data;
+	char __iomem *vaddr;
+	char *vaddr_atomic;
+	int i, o, l;
+	int ret = 0;
+	unsigned long pfn;
+	unsigned long unwritten;
+
+	user_data = (char __user *) (uintptr_t) args->data_ptr;
+	remain = args->size;
+	if (!access_ok(VERIFY_READ, user_data, remain))
+		return -EFAULT;
+
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_gem_object_pin(obj, 0);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+	ret = i915_gem_set_domain(obj, file_priv,
+				  I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+	if (ret)
+		goto fail;
+
+	obj_priv = obj->driver_private;
+	offset = obj_priv->gtt_offset + args->offset;
+	obj_priv->dirty = 1;
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * i = page number
+		 * o = offset within page
+		 * l = bytes to copy
+		 */
+		i = offset >> PAGE_SHIFT;
+		o = offset & (PAGE_SIZE-1);
+		l = remain;
+		if ((o + l) > PAGE_SIZE)
+			l = PAGE_SIZE - o;
+
+		pfn = (dev->agp->base >> PAGE_SHIFT) + i;
+
+#ifdef CONFIG_HIGHMEM
+		/* This is a workaround for the low performance of iounmap
+		 * (approximate 10% cpu cost on normal 3D workloads).
+		 * kmap_atomic on HIGHMEM kernels happens to let us map card
+		 * memory without taking IPIs.  When the vmap rework lands
+		 * we should be able to dump this hack.
+		 */
+		vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
+#if WATCH_PWRITE
+		DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
+			 i, o, l, pfn, vaddr_atomic);
+#endif
+		unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
+							      user_data, l);
+		kunmap_atomic(vaddr_atomic, KM_USER0);
+
+		if (unwritten)
+#endif /* CONFIG_HIGHMEM */
+		{
+			vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+#if WATCH_PWRITE
+			DRM_INFO("pwrite slow i %d o %d l %d "
+				 "pfn %ld vaddr %p\n",
+				 i, o, l, pfn, vaddr);
+#endif
+			if (vaddr == NULL) {
+				ret = -EFAULT;
+				goto fail;
+			}
+			unwritten = __copy_from_user(vaddr + o, user_data, l);
+#if WATCH_PWRITE
+			DRM_INFO("unwritten %ld\n", unwritten);
+#endif
+			iounmap(vaddr);
+			if (unwritten) {
+				ret = -EFAULT;
+				goto fail;
+			}
+		}
+
+		remain -= l;
+		user_data += l;
+		offset += l;
+	}
+#if WATCH_PWRITE && 1
+	i915_gem_clflush_object(obj);
+	i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
+	i915_gem_clflush_object(obj);
+#endif
+
+fail:
+	i915_gem_object_unpin(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int
+i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+		      struct drm_i915_gem_pwrite *args,
+		      struct drm_file *file_priv)
+{
+	int ret;
+	loff_t offset;
+	ssize_t written;
+
+	mutex_lock(&dev->struct_mutex);
+
+	ret = i915_gem_set_domain(obj, file_priv,
+				  I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	offset = args->offset;
+
+	written = vfs_write(obj->filp,
+			    (char __user *)(uintptr_t) args->data_ptr,
+			    args->size, &offset);
+	if (written != args->size) {
+		mutex_unlock(&dev->struct_mutex);
+		if (written < 0)
+			return written;
+		else
+			return -EINVAL;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_i915_gem_pwrite *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret = 0;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+	obj_priv = obj->driver_private;
+
+	/* Bounds check destination.
+	 *
+	 * XXX: This could use review for overflow issues...
+	 */
+	if (args->offset > obj->size || args->size > obj->size ||
+	    args->offset + args->size > obj->size) {
+		drm_gem_object_unreference(obj);
+		return -EINVAL;
+	}
+
+	/* We can only do the GTT pwrite on untiled buffers, as otherwise
+	 * it would end up going through the fenced access, and we'll get
+	 * different detiling behavior between reading and writing.
+	 * pread/pwrite currently are reading and writing from the CPU
+	 * perspective, requiring manual detiling by the client.
+	 */
+	if (obj_priv->tiling_mode == I915_TILING_NONE &&
+	    dev->gtt_total != 0)
+		ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
+	else
+		ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+
+#if WATCH_PWRITE
+	if (ret)
+		DRM_INFO("pwrite failed %d\n", ret);
+#endif
+
+	drm_gem_object_unreference(obj);
+
+	return ret;
+}
+
+/**
+ * Called when user space prepares to use an object
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_i915_gem_set_domain *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+
+	mutex_lock(&dev->struct_mutex);
+#if WATCH_BUF
+	DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+		 obj, obj->size, args->read_domains, args->write_domain);
+#endif
+	ret = i915_gem_set_domain(obj, file_priv,
+				  args->read_domains, args->write_domain);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+int
+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_i915_gem_sw_finish *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret = 0;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EBADF;
+	}
+
+#if WATCH_BUF
+	DRM_INFO("%s: sw_finish %d (%p %d)\n",
+		 __func__, args->handle, obj, obj->size);
+#endif
+	obj_priv = obj->driver_private;
+
+	/* Pinned buffers may be scanout, so flush the cache */
+	if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
+		i915_gem_clflush_object(obj);
+		drm_agp_chipset_flush(dev);
+	}
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_mmap *args = data;
+	struct drm_gem_object *obj;
+	loff_t offset;
+	unsigned long addr;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+
+	offset = args->offset;
+
+	down_write(&current->mm->mmap_sem);
+	addr = do_mmap(obj->filp, 0, args->size,
+		       PROT_READ | PROT_WRITE, MAP_SHARED,
+		       args->offset);
+	up_write(&current->mm->mmap_sem);
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR((void *)addr))
+		return addr;
+
+	args->addr_ptr = (uint64_t) addr;
+
+	return 0;
+}
+
+static void
+i915_gem_object_free_page_list(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int page_count = obj->size / PAGE_SIZE;
+	int i;
+
+	if (obj_priv->page_list == NULL)
+		return;
+
+
+	for (i = 0; i < page_count; i++)
+		if (obj_priv->page_list[i] != NULL) {
+			if (obj_priv->dirty)
+				set_page_dirty(obj_priv->page_list[i]);
+			mark_page_accessed(obj_priv->page_list[i]);
+			page_cache_release(obj_priv->page_list[i]);
+		}
+	obj_priv->dirty = 0;
+
+	drm_free(obj_priv->page_list,
+		 page_count * sizeof(struct page *),
+		 DRM_MEM_DRIVER);
+	obj_priv->page_list = NULL;
+}
+
+static void
+i915_gem_object_move_to_active(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+	/* Add a reference if we're newly entering the active list. */
+	if (!obj_priv->active) {
+		drm_gem_object_reference(obj);
+		obj_priv->active = 1;
+	}
+	/* Move from whatever list we were on to the tail of execution. */
+	list_move_tail(&obj_priv->list,
+		       &dev_priv->mm.active_list);
+}
+
+
+static void
+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+	if (obj_priv->pin_count != 0)
+		list_del_init(&obj_priv->list);
+	else
+		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+	if (obj_priv->active) {
+		obj_priv->active = 0;
+		drm_gem_object_unreference(obj);
+	}
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static uint32_t
+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_request *request;
+	uint32_t seqno;
+	int was_empty;
+	RING_LOCALS;
+
+	request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+	if (request == NULL)
+		return 0;
+
+	/* Grab the seqno we're going to make this request be, and bump the
+	 * next (skipping 0 so it can be the reserved no-seqno value).
+	 */
+	seqno = dev_priv->mm.next_gem_seqno;
+	dev_priv->mm.next_gem_seqno++;
+	if (dev_priv->mm.next_gem_seqno == 0)
+		dev_priv->mm.next_gem_seqno++;
+
+	BEGIN_LP_RING(4);
+	OUT_RING(MI_STORE_DWORD_INDEX);
+	OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	OUT_RING(seqno);
+
+	OUT_RING(MI_USER_INTERRUPT);
+	ADVANCE_LP_RING();
+
+	DRM_DEBUG("%d\n", seqno);
+
+	request->seqno = seqno;
+	request->emitted_jiffies = jiffies;
+	request->flush_domains = flush_domains;
+	was_empty = list_empty(&dev_priv->mm.request_list);
+	list_add_tail(&request->list, &dev_priv->mm.request_list);
+
+	if (was_empty && !dev_priv->mm.suspended)
+		schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+	return seqno;
+}
+
+/**
+ * Command execution barrier
+ *
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+static uint32_t
+i915_retire_commands(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+	uint32_t flush_domains = 0;
+	RING_LOCALS;
+
+	/* The sampler always gets flushed on i965 (sigh) */
+	if (IS_I965G(dev))
+		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+	BEGIN_LP_RING(2);
+	OUT_RING(cmd);
+	OUT_RING(0); /* noop */
+	ADVANCE_LP_RING();
+	return flush_domains;
+}
+
+/**
+ * Moves buffers associated only with the given active seqno from the active
+ * to inactive list, potentially freeing them.
+ */
+static void
+i915_gem_retire_request(struct drm_device *dev,
+			struct drm_i915_gem_request *request)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	/* Move any buffers on the active list that are no longer referenced
+	 * by the ringbuffer to the flushing/inactive lists as appropriate.
+	 */
+	while (!list_empty(&dev_priv->mm.active_list)) {
+		struct drm_gem_object *obj;
+		struct drm_i915_gem_object *obj_priv;
+
+		obj_priv = list_first_entry(&dev_priv->mm.active_list,
+					    struct drm_i915_gem_object,
+					    list);
+		obj = obj_priv->obj;
+
+		/* If the seqno being retired doesn't match the oldest in the
+		 * list, then the oldest in the list must still be newer than
+		 * this seqno.
+		 */
+		if (obj_priv->last_rendering_seqno != request->seqno)
+			return;
+#if WATCH_LRU
+		DRM_INFO("%s: retire %d moves to inactive list %p\n",
+			 __func__, request->seqno, obj);
+#endif
+
+		if (obj->write_domain != 0) {
+			list_move_tail(&obj_priv->list,
+				       &dev_priv->mm.flushing_list);
+		} else {
+			i915_gem_object_move_to_inactive(obj);
+		}
+	}
+
+	if (request->flush_domains != 0) {
+		struct drm_i915_gem_object *obj_priv, *next;
+
+		/* Clear the write domain and activity from any buffers
+		 * that are just waiting for a flush matching the one retired.
+		 */
+		list_for_each_entry_safe(obj_priv, next,
+					 &dev_priv->mm.flushing_list, list) {
+			struct drm_gem_object *obj = obj_priv->obj;
+
+			if (obj->write_domain & request->flush_domains) {
+				obj->write_domain = 0;
+				i915_gem_object_move_to_inactive(obj);
+			}
+		}
+
+	}
+}
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static int
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+	return (int32_t)(seq1 - seq2) >= 0;
+}
+
+uint32_t
+i915_get_gem_seqno(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t seqno;
+
+	seqno = i915_get_gem_seqno(dev);
+
+	while (!list_empty(&dev_priv->mm.request_list)) {
+		struct drm_i915_gem_request *request;
+		uint32_t retiring_seqno;
+
+		request = list_first_entry(&dev_priv->mm.request_list,
+					   struct drm_i915_gem_request,
+					   list);
+		retiring_seqno = request->seqno;
+
+		if (i915_seqno_passed(seqno, retiring_seqno) ||
+		    dev_priv->mm.wedged) {
+			i915_gem_retire_request(dev, request);
+
+			list_del(&request->list);
+			drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+		} else
+			break;
+	}
+}
+
+void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv;
+	struct drm_device *dev;
+
+	dev_priv = container_of(work, drm_i915_private_t,
+				mm.retire_work.work);
+	dev = dev_priv->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_retire_requests(dev);
+	if (!dev_priv->mm.suspended &&
+	    !list_empty(&dev_priv->mm.request_list))
+		schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+static int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	BUG_ON(seqno == 0);
+
+	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+		dev_priv->mm.waiting_gem_seqno = seqno;
+		i915_user_irq_get(dev);
+		ret = wait_event_interruptible(dev_priv->irq_queue,
+					       i915_seqno_passed(i915_get_gem_seqno(dev),
+								 seqno) ||
+					       dev_priv->mm.wedged);
+		i915_user_irq_put(dev);
+		dev_priv->mm.waiting_gem_seqno = 0;
+	}
+	if (dev_priv->mm.wedged)
+		ret = -EIO;
+
+	if (ret && ret != -ERESTARTSYS)
+		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+			  __func__, ret, seqno, i915_get_gem_seqno(dev));
+
+	/* Directly dispatch request retiring.  While we have the work queue
+	 * to handle this, the waiter on a request often wants an associated
+	 * buffer to have made it to the inactive list, and we would need
+	 * a separate wait queue to handle that.
+	 */
+	if (ret == 0)
+		i915_gem_retire_requests(dev);
+
+	return ret;
+}
+
+static void
+i915_gem_flush(struct drm_device *dev,
+	       uint32_t invalidate_domains,
+	       uint32_t flush_domains)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t cmd;
+	RING_LOCALS;
+
+#if WATCH_EXEC
+	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+		  invalidate_domains, flush_domains);
+#endif
+
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		drm_agp_chipset_flush(dev);
+
+	if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
+						     I915_GEM_DOMAIN_GTT)) {
+		/*
+		 * read/write caches:
+		 *
+		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+		 * also flushed at 2d versus 3d pipeline switches.
+		 *
+		 * read-only caches:
+		 *
+		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+		 * MI_READ_FLUSH is set, and is always flushed on 965.
+		 *
+		 * I915_GEM_DOMAIN_COMMAND may not exist?
+		 *
+		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+		 * invalidated when MI_EXE_FLUSH is set.
+		 *
+		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+		 * invalidated with every MI_FLUSH.
+		 *
+		 * TLBs:
+		 *
+		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+		 * are flushed at any MI_FLUSH.
+		 */
+
+		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+		if ((invalidate_domains|flush_domains) &
+		    I915_GEM_DOMAIN_RENDER)
+			cmd &= ~MI_NO_WRITE_FLUSH;
+		if (!IS_I965G(dev)) {
+			/*
+			 * On the 965, the sampler cache always gets flushed
+			 * and this bit is reserved.
+			 */
+			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+				cmd |= MI_READ_FLUSH;
+		}
+		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+			cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+		BEGIN_LP_RING(2);
+		OUT_RING(cmd);
+		OUT_RING(0); /* noop */
+		ADVANCE_LP_RING();
+	}
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static int
+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int ret;
+
+	/* If there are writes queued to the buffer, flush and
+	 * create a new seqno to wait for.
+	 */
+	if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
+		uint32_t write_domain = obj->write_domain;
+#if WATCH_BUF
+		DRM_INFO("%s: flushing object %p from write domain %08x\n",
+			  __func__, obj, write_domain);
+#endif
+		i915_gem_flush(dev, 0, write_domain);
+
+		i915_gem_object_move_to_active(obj);
+		obj_priv->last_rendering_seqno = i915_add_request(dev,
+								  write_domain);
+		BUG_ON(obj_priv->last_rendering_seqno == 0);
+#if WATCH_LRU
+		DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
+#endif
+	}
+
+	/* If there is rendering queued on the buffer being evicted, wait for
+	 * it.
+	 */
+	if (obj_priv->active) {
+#if WATCH_BUF
+		DRM_INFO("%s: object %p wait for seqno %08x\n",
+			  __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+		ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+static int
+i915_gem_object_unbind(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int ret = 0;
+
+#if WATCH_BUF
+	DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+	DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+#endif
+	if (obj_priv->gtt_space == NULL)
+		return 0;
+
+	if (obj_priv->pin_count != 0) {
+		DRM_ERROR("Attempting to unbind pinned buffer\n");
+		return -EINVAL;
+	}
+
+	/* Wait for any rendering to complete
+	 */
+	ret = i915_gem_object_wait_rendering(obj);
+	if (ret) {
+		DRM_ERROR("wait_rendering failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Move the object to the CPU domain to ensure that
+	 * any possible CPU writes while it's not in the GTT
+	 * are flushed when we go to remap it. This will
+	 * also ensure that all pending GPU writes are finished
+	 * before we unbind.
+	 */
+	ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
+					 I915_GEM_DOMAIN_CPU);
+	if (ret) {
+		DRM_ERROR("set_domain failed: %d\n", ret);
+		return ret;
+	}
+
+	if (obj_priv->agp_mem != NULL) {
+		drm_unbind_agp(obj_priv->agp_mem);
+		drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+		obj_priv->agp_mem = NULL;
+	}
+
+	BUG_ON(obj_priv->active);
+
+	i915_gem_object_free_page_list(obj);
+
+	if (obj_priv->gtt_space) {
+		atomic_dec(&dev->gtt_count);
+		atomic_sub(obj->size, &dev->gtt_memory);
+
+		drm_mm_put_block(obj_priv->gtt_space);
+		obj_priv->gtt_space = NULL;
+	}
+
+	/* Remove ourselves from the LRU list if present. */
+	if (!list_empty(&obj_priv->list))
+		list_del_init(&obj_priv->list);
+
+	return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret = 0;
+
+	for (;;) {
+		/* If there's an inactive buffer available now, grab it
+		 * and be done.
+		 */
+		if (!list_empty(&dev_priv->mm.inactive_list)) {
+			obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
+						    struct drm_i915_gem_object,
+						    list);
+			obj = obj_priv->obj;
+			BUG_ON(obj_priv->pin_count != 0);
+#if WATCH_LRU
+			DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+			BUG_ON(obj_priv->active);
+
+			/* Wait on the rendering and unbind the buffer. */
+			ret = i915_gem_object_unbind(obj);
+			break;
+		}
+
+		/* If we didn't get anything, but the ring is still processing
+		 * things, wait for one of those things to finish and hopefully
+		 * leave us a buffer to evict.
+		 */
+		if (!list_empty(&dev_priv->mm.request_list)) {
+			struct drm_i915_gem_request *request;
+
+			request = list_first_entry(&dev_priv->mm.request_list,
+						   struct drm_i915_gem_request,
+						   list);
+
+			ret = i915_wait_request(dev, request->seqno);
+			if (ret)
+				break;
+
+			/* if waiting caused an object to become inactive,
+			 * then loop around and wait for it. Otherwise, we
+			 * assume that waiting freed and unbound something,
+			 * so there should now be some space in the GTT
+			 */
+			if (!list_empty(&dev_priv->mm.inactive_list))
+				continue;
+			break;
+		}
+
+		/* If we didn't have anything on the request list but there
+		 * are buffers awaiting a flush, emit one and try again.
+		 * When we wait on it, those buffers waiting for that flush
+		 * will get moved to inactive.
+		 */
+		if (!list_empty(&dev_priv->mm.flushing_list)) {
+			obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+						    struct drm_i915_gem_object,
+						    list);
+			obj = obj_priv->obj;
+
+			i915_gem_flush(dev,
+				       obj->write_domain,
+				       obj->write_domain);
+			i915_add_request(dev, obj->write_domain);
+
+			obj = NULL;
+			continue;
+		}
+
+		DRM_ERROR("inactive empty %d request empty %d "
+			  "flushing empty %d\n",
+			  list_empty(&dev_priv->mm.inactive_list),
+			  list_empty(&dev_priv->mm.request_list),
+			  list_empty(&dev_priv->mm.flushing_list));
+		/* If we didn't do any of the above, there's nothing to be done
+		 * and we just can't fit it in.
+		 */
+		return -ENOMEM;
+	}
+	return ret;
+}
+
+static int
+i915_gem_object_get_page_list(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int page_count, i;
+	struct address_space *mapping;
+	struct inode *inode;
+	struct page *page;
+	int ret;
+
+	if (obj_priv->page_list)
+		return 0;
+
+	/* Get the list of pages out of our struct file.  They'll be pinned
+	 * at this point until we release them.
+	 */
+	page_count = obj->size / PAGE_SIZE;
+	BUG_ON(obj_priv->page_list != NULL);
+	obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+					 DRM_MEM_DRIVER);
+	if (obj_priv->page_list == NULL) {
+		DRM_ERROR("Faled to allocate page list\n");
+		return -ENOMEM;
+	}
+
+	inode = obj->filp->f_path.dentry->d_inode;
+	mapping = inode->i_mapping;
+	for (i = 0; i < page_count; i++) {
+		page = read_mapping_page(mapping, i, NULL);
+		if (IS_ERR(page)) {
+			ret = PTR_ERR(page);
+			DRM_ERROR("read_mapping_page failed: %d\n", ret);
+			i915_gem_object_free_page_list(obj);
+			return ret;
+		}
+		obj_priv->page_list[i] = page;
+	}
+	return 0;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+{
+	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_mm_node *free_space;
+	int page_count, ret;
+
+	if (alignment == 0)
+		alignment = PAGE_SIZE;
+	if (alignment & (PAGE_SIZE - 1)) {
+		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+		return -EINVAL;
+	}
+
+ search_free:
+	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+					obj->size, alignment, 0);
+	if (free_space != NULL) {
+		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
+						       alignment);
+		if (obj_priv->gtt_space != NULL) {
+			obj_priv->gtt_space->private = obj;
+			obj_priv->gtt_offset = obj_priv->gtt_space->start;
+		}
+	}
+	if (obj_priv->gtt_space == NULL) {
+		/* If the gtt is empty and we're still having trouble
+		 * fitting our object in, we're out of memory.
+		 */
+#if WATCH_LRU
+		DRM_INFO("%s: GTT full, evicting something\n", __func__);
+#endif
+		if (list_empty(&dev_priv->mm.inactive_list) &&
+		    list_empty(&dev_priv->mm.flushing_list) &&
+		    list_empty(&dev_priv->mm.active_list)) {
+			DRM_ERROR("GTT full, but LRU list empty\n");
+			return -ENOMEM;
+		}
+
+		ret = i915_gem_evict_something(dev);
+		if (ret != 0) {
+			DRM_ERROR("Failed to evict a buffer %d\n", ret);
+			return ret;
+		}
+		goto search_free;
+	}
+
+#if WATCH_BUF
+	DRM_INFO("Binding object of size %d at 0x%08x\n",
+		 obj->size, obj_priv->gtt_offset);
+#endif
+	ret = i915_gem_object_get_page_list(obj);
+	if (ret) {
+		drm_mm_put_block(obj_priv->gtt_space);
+		obj_priv->gtt_space = NULL;
+		return ret;
+	}
+
+	page_count = obj->size / PAGE_SIZE;
+	/* Create an AGP memory structure pointing at our pages, and bind it
+	 * into the GTT.
+	 */
+	obj_priv->agp_mem = drm_agp_bind_pages(dev,
+					       obj_priv->page_list,
+					       page_count,
+					       obj_priv->gtt_offset,
+					       obj_priv->agp_type);
+	if (obj_priv->agp_mem == NULL) {
+		i915_gem_object_free_page_list(obj);
+		drm_mm_put_block(obj_priv->gtt_space);
+		obj_priv->gtt_space = NULL;
+		return -ENOMEM;
+	}
+	atomic_inc(&dev->gtt_count);
+	atomic_add(obj->size, &dev->gtt_memory);
+
+	/* Assert that the object is not currently in any GPU domain. As it
+	 * wasn't in the GTT, there shouldn't be any way it could have been in
+	 * a GPU cache
+	 */
+	BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+	BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+
+	return 0;
+}
+
+void
+i915_gem_clflush_object(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
+
+	/* If we don't have a page list set up, then we're not pinned
+	 * to GPU, and we can ignore the cache flush because it'll happen
+	 * again at bind time.
+	 */
+	if (obj_priv->page_list == NULL)
+		return;
+
+	drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+}
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ *	1. Allocated
+ *	2. Written by CPU
+ *	3. Mapped to GTT
+ *	4. Read by GPU
+ *	5. Unmapped from GTT
+ *	6. Freed
+ *
+ *	Let's take these a step at a time
+ *
+ *	1. Allocated
+ *		Pages allocated from the kernel may still have
+ *		cache contents, so we set them to (CPU, CPU) always.
+ *	2. Written by CPU (using pwrite)
+ *		The pwrite function calls set_domain (CPU, CPU) and
+ *		this function does nothing (as nothing changes)
+ *	3. Mapped by GTT
+ *		This function asserts that the object is not
+ *		currently in any GPU-based read or write domains
+ *	4. Read by GPU
+ *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ *		As write_domain is zero, this function adds in the
+ *		current read domains (CPU+COMMAND, 0).
+ *		flush_domains is set to CPU.
+ *		invalidate_domains is set to COMMAND
+ *		clflush is run to get data out of the CPU caches
+ *		then i915_dev_set_domain calls i915_gem_flush to
+ *		emit an MI_FLUSH and drm_agp_chipset_flush
+ *	5. Unmapped from GTT
+ *		i915_gem_object_unbind calls set_domain (CPU, CPU)
+ *		flush_domains and invalidate_domains end up both zero
+ *		so no flushing/invalidating happens
+ *	6. Freed
+ *		yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ *	1. Allocated
+ *	2. Mapped to GTT
+ *	3. Read/written by GPU
+ *	4. set_domain to (CPU,CPU)
+ *	5. Read/written by CPU
+ *	6. Read/written by GPU
+ *
+ *	1. Allocated
+ *		Same as last example, (CPU, CPU)
+ *	2. Mapped to GTT
+ *		Nothing changes (assertions find that it is not in the GPU)
+ *	3. Read/written by GPU
+ *		execbuffer calls set_domain (RENDER, RENDER)
+ *		flush_domains gets CPU
+ *		invalidate_domains gets GPU
+ *		clflush (obj)
+ *		MI_FLUSH and drm_agp_chipset_flush
+ *	4. set_domain (CPU, CPU)
+ *		flush_domains gets GPU
+ *		invalidate_domains gets CPU
+ *		wait_rendering (obj) to make sure all drawing is complete.
+ *		This will include an MI_FLUSH to get the data from GPU
+ *		to memory
+ *		clflush (obj) to invalidate the CPU cache
+ *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ *	5. Read/written by CPU
+ *		cache lines are loaded and dirtied
+ *	6. Read written by GPU
+ *		Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ *	1. Allocated
+ *	2. Written by CPU
+ *	3. Read by GPU
+ *	4. Updated (written) by CPU again
+ *	5. Read by GPU
+ *
+ *	1. Allocated
+ *		(CPU, CPU)
+ *	2. Written by CPU
+ *		(CPU, CPU)
+ *	3. Read by GPU
+ *		(CPU+RENDER, 0)
+ *		flush_domains = CPU
+ *		invalidate_domains = RENDER
+ *		clflush (obj)
+ *		MI_FLUSH
+ *		drm_agp_chipset_flush
+ *	4. Updated (written) by CPU again
+ *		(CPU, CPU)
+ *		flush_domains = 0 (no previous write domain)
+ *		invalidate_domains = 0 (no new read domains)
+ *	5. Read by GPU
+ *		(CPU+RENDER, 0)
+ *		flush_domains = CPU
+ *		invalidate_domains = RENDER
+ *		clflush (obj)
+ *		MI_FLUSH
+ *		drm_agp_chipset_flush
+ */
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+			    uint32_t read_domains,
+			    uint32_t write_domain)
+{
+	struct drm_device		*dev = obj->dev;
+	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
+	uint32_t			invalidate_domains = 0;
+	uint32_t			flush_domains = 0;
+	int				ret;
+
+#if WATCH_BUF
+	DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
+		 __func__, obj,
+		 obj->read_domains, read_domains,
+		 obj->write_domain, write_domain);
+#endif
+	/*
+	 * If the object isn't moving to a new write domain,
+	 * let the object stay in multiple read domains
+	 */
+	if (write_domain == 0)
+		read_domains |= obj->read_domains;
+	else
+		obj_priv->dirty = 1;
+
+	/*
+	 * Flush the current write domain if
+	 * the new read domains don't match. Invalidate
+	 * any read domains which differ from the old
+	 * write domain
+	 */
+	if (obj->write_domain && obj->write_domain != read_domains) {
+		flush_domains |= obj->write_domain;
+		invalidate_domains |= read_domains & ~obj->write_domain;
+	}
+	/*
+	 * Invalidate any read caches which may have
+	 * stale data. That is, any new read domains.
+	 */
+	invalidate_domains |= read_domains & ~obj->read_domains;
+	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
+#if WATCH_BUF
+		DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+			 __func__, flush_domains, invalidate_domains);
+#endif
+		/*
+		 * If we're invaliding the CPU cache and flushing a GPU cache,
+		 * then pause for rendering so that the GPU caches will be
+		 * flushed before the cpu cache is invalidated
+		 */
+		if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
+		    (flush_domains & ~(I915_GEM_DOMAIN_CPU |
+				       I915_GEM_DOMAIN_GTT))) {
+			ret = i915_gem_object_wait_rendering(obj);
+			if (ret)
+				return ret;
+		}
+		i915_gem_clflush_object(obj);
+	}
+
+	if ((write_domain | flush_domains) != 0)
+		obj->write_domain = write_domain;
+
+	/* If we're invalidating the CPU domain, clear the per-page CPU
+	 * domain list as well.
+	 */
+	if (obj_priv->page_cpu_valid != NULL &&
+	    (write_domain != 0 ||
+	     read_domains & I915_GEM_DOMAIN_CPU)) {
+		drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
+			 DRM_MEM_DRIVER);
+		obj_priv->page_cpu_valid = NULL;
+	}
+	obj->read_domains = read_domains;
+
+	dev->invalidate_domains |= invalidate_domains;
+	dev->flush_domains |= flush_domains;
+#if WATCH_BUF
+	DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
+		 __func__,
+		 obj->read_domains, obj->write_domain,
+		 dev->invalidate_domains, dev->flush_domains);
+#endif
+	return 0;
+}
+
+/**
+ * Set the read/write domain on a range of the object.
+ *
+ * Currently only implemented for CPU reads, otherwise drops to normal
+ * i915_gem_object_set_domain().
+ */
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+				 uint64_t offset,
+				 uint64_t size,
+				 uint32_t read_domains,
+				 uint32_t write_domain)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int ret, i;
+
+	if (obj->read_domains & I915_GEM_DOMAIN_CPU)
+		return 0;
+
+	if (read_domains != I915_GEM_DOMAIN_CPU ||
+	    write_domain != 0)
+		return i915_gem_object_set_domain(obj,
+						  read_domains, write_domain);
+
+	/* Wait on any GPU rendering to the object to be flushed. */
+	if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
+		ret = i915_gem_object_wait_rendering(obj);
+		if (ret)
+			return ret;
+	}
+
+	if (obj_priv->page_cpu_valid == NULL) {
+		obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
+						      DRM_MEM_DRIVER);
+	}
+
+	/* Flush the cache on any pages that are still invalid from the CPU's
+	 * perspective.
+	 */
+	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
+		if (obj_priv->page_cpu_valid[i])
+			continue;
+
+		drm_clflush_pages(obj_priv->page_list + i, 1);
+
+		obj_priv->page_cpu_valid[i] = 1;
+	}
+
+	return 0;
+}
+
+/**
+ * Once all of the objects have been set in the proper domain,
+ * perform the necessary flush and invalidate operations.
+ *
+ * Returns the write domains flushed, for use in flush tracking.
+ */
+static uint32_t
+i915_gem_dev_set_domain(struct drm_device *dev)
+{
+	uint32_t flush_domains = dev->flush_domains;
+
+	/*
+	 * Now that all the buffers are synced to the proper domains,
+	 * flush and invalidate the collected domains
+	 */
+	if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+			  __func__,
+			 dev->invalidate_domains,
+			 dev->flush_domains);
+#endif
+		i915_gem_flush(dev,
+			       dev->invalidate_domains,
+			       dev->flush_domains);
+		dev->invalidate_domains = 0;
+		dev->flush_domains = 0;
+	}
+
+	return flush_domains;
+}
+
+/**
+ * Pin an object to the GTT and evaluate the relocations landing in it.
+ */
+static int
+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+				 struct drm_file *file_priv,
+				 struct drm_i915_gem_exec_object *entry)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_relocation_entry __user *relocs;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int i, ret;
+	uint32_t last_reloc_offset = -1;
+	void __iomem *reloc_page = NULL;
+
+	/* Choose the GTT offset for our buffer and put it there. */
+	ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+	if (ret)
+		return ret;
+
+	entry->offset = obj_priv->gtt_offset;
+
+	relocs = (struct drm_i915_gem_relocation_entry __user *)
+		 (uintptr_t) entry->relocs_ptr;
+	/* Apply the relocations, using the GTT aperture to avoid cache
+	 * flushing requirements.
+	 */
+	for (i = 0; i < entry->relocation_count; i++) {
+		struct drm_gem_object *target_obj;
+		struct drm_i915_gem_object *target_obj_priv;
+		uint32_t reloc_val, reloc_offset;
+		uint32_t __iomem *reloc_entry;
+
+		ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
+		if (ret != 0) {
+			i915_gem_object_unpin(obj);
+			return ret;
+		}
+
+		target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+						   reloc.target_handle);
+		if (target_obj == NULL) {
+			i915_gem_object_unpin(obj);
+			return -EBADF;
+		}
+		target_obj_priv = target_obj->driver_private;
+
+		/* The target buffer should have appeared before us in the
+		 * exec_object list, so it should have a GTT space bound by now.
+		 */
+		if (target_obj_priv->gtt_space == NULL) {
+			DRM_ERROR("No GTT space found for object %d\n",
+				  reloc.target_handle);
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return -EINVAL;
+		}
+
+		if (reloc.offset > obj->size - 4) {
+			DRM_ERROR("Relocation beyond object bounds: "
+				  "obj %p target %d offset %d size %d.\n",
+				  obj, reloc.target_handle,
+				  (int) reloc.offset, (int) obj->size);
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return -EINVAL;
+		}
+		if (reloc.offset & 3) {
+			DRM_ERROR("Relocation not 4-byte aligned: "
+				  "obj %p target %d offset %d.\n",
+				  obj, reloc.target_handle,
+				  (int) reloc.offset);
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return -EINVAL;
+		}
+
+		if (reloc.write_domain && target_obj->pending_write_domain &&
+		    reloc.write_domain != target_obj->pending_write_domain) {
+			DRM_ERROR("Write domain conflict: "
+				  "obj %p target %d offset %d "
+				  "new %08x old %08x\n",
+				  obj, reloc.target_handle,
+				  (int) reloc.offset,
+				  reloc.write_domain,
+				  target_obj->pending_write_domain);
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return -EINVAL;
+		}
+
+#if WATCH_RELOC
+		DRM_INFO("%s: obj %p offset %08x target %d "
+			 "read %08x write %08x gtt %08x "
+			 "presumed %08x delta %08x\n",
+			 __func__,
+			 obj,
+			 (int) reloc.offset,
+			 (int) reloc.target_handle,
+			 (int) reloc.read_domains,
+			 (int) reloc.write_domain,
+			 (int) target_obj_priv->gtt_offset,
+			 (int) reloc.presumed_offset,
+			 reloc.delta);
+#endif
+
+		target_obj->pending_read_domains |= reloc.read_domains;
+		target_obj->pending_write_domain |= reloc.write_domain;
+
+		/* If the relocation already has the right value in it, no
+		 * more work needs to be done.
+		 */
+		if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+			drm_gem_object_unreference(target_obj);
+			continue;
+		}
+
+		/* Now that we're going to actually write some data in,
+		 * make sure that any rendering using this buffer's contents
+		 * is completed.
+		 */
+		i915_gem_object_wait_rendering(obj);
+
+		/* As we're writing through the gtt, flush
+		 * any CPU writes before we write the relocations
+		 */
+		if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+			i915_gem_clflush_object(obj);
+			drm_agp_chipset_flush(dev);
+			obj->write_domain = 0;
+		}
+
+		/* Map the page containing the relocation we're going to
+		 * perform.
+		 */
+		reloc_offset = obj_priv->gtt_offset + reloc.offset;
+		if (reloc_page == NULL ||
+		    (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
+		    (reloc_offset & ~(PAGE_SIZE - 1))) {
+			if (reloc_page != NULL)
+				iounmap(reloc_page);
+
+			reloc_page = ioremap_wc(dev->agp->base +
+						(reloc_offset &
+						 ~(PAGE_SIZE - 1)),
+						PAGE_SIZE);
+			last_reloc_offset = reloc_offset;
+			if (reloc_page == NULL) {
+				drm_gem_object_unreference(target_obj);
+				i915_gem_object_unpin(obj);
+				return -ENOMEM;
+			}
+		}
+
+		reloc_entry = (uint32_t __iomem *)(reloc_page +
+					   (reloc_offset & (PAGE_SIZE - 1)));
+		reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
+#if WATCH_BUF
+		DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+			  obj, (unsigned int) reloc.offset,
+			  readl(reloc_entry), reloc_val);
+#endif
+		writel(reloc_val, reloc_entry);
+
+		/* Write the updated presumed offset for this entry back out
+		 * to the user.
+		 */
+		reloc.presumed_offset = target_obj_priv->gtt_offset;
+		ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
+		if (ret != 0) {
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return ret;
+		}
+
+		drm_gem_object_unreference(target_obj);
+	}
+
+	if (reloc_page != NULL)
+		iounmap(reloc_page);
+
+#if WATCH_BUF
+	if (0)
+		i915_gem_dump_object(obj, 128, __func__, ~0);
+#endif
+	return 0;
+}
+
+/** Dispatch a batchbuffer to the ring
+ */
+static int
+i915_dispatch_gem_execbuffer(struct drm_device *dev,
+			      struct drm_i915_gem_execbuffer *exec,
+			      uint64_t exec_offset)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+					     (uintptr_t) exec->cliprects_ptr;
+	int nbox = exec->num_cliprects;
+	int i = 0, count;
+	uint32_t	exec_start, exec_len;
+	RING_LOCALS;
+
+	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+	exec_len = (uint32_t) exec->batch_len;
+
+	if ((exec_start | exec_len) & 0x7) {
+		DRM_ERROR("alignment\n");
+		return -EINVAL;
+	}
+
+	if (!exec_start)
+		return -EINVAL;
+
+	count = nbox ? nbox : 1;
+
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			int ret = i915_emit_box(dev, boxes, i,
+						exec->DR1, exec->DR4);
+			if (ret)
+				return ret;
+		}
+
+		if (IS_I830(dev) || IS_845G(dev)) {
+			BEGIN_LP_RING(4);
+			OUT_RING(MI_BATCH_BUFFER);
+			OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+			OUT_RING(exec_start + exec_len - 4);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		} else {
+			BEGIN_LP_RING(2);
+			if (IS_I965G(dev)) {
+				OUT_RING(MI_BATCH_BUFFER_START |
+					 (2 << 6) |
+					 MI_BATCH_NON_SECURE_I965);
+				OUT_RING(exec_start);
+			} else {
+				OUT_RING(MI_BATCH_BUFFER_START |
+					 (2 << 6));
+				OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+			}
+			ADVANCE_LP_RING();
+		}
+	}
+
+	/* XXX breadcrumb */
+	return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+	int ret = 0;
+	uint32_t seqno;
+
+	mutex_lock(&dev->struct_mutex);
+	seqno = i915_file_priv->mm.last_gem_throttle_seqno;
+	i915_file_priv->mm.last_gem_throttle_seqno =
+		i915_file_priv->mm.last_gem_seqno;
+	if (seqno)
+		ret = i915_wait_request(dev, seqno);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+	struct drm_i915_gem_execbuffer *args = data;
+	struct drm_i915_gem_exec_object *exec_list = NULL;
+	struct drm_gem_object **object_list = NULL;
+	struct drm_gem_object *batch_obj;
+	int ret, i, pinned = 0;
+	uint64_t exec_offset;
+	uint32_t seqno, flush_domains;
+
+#if WATCH_EXEC
+	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+	if (args->buffer_count < 1) {
+		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+	/* Copy in the exec list from userland */
+	exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
+			       DRM_MEM_DRIVER);
+	object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
+				 DRM_MEM_DRIVER);
+	if (exec_list == NULL || object_list == NULL) {
+		DRM_ERROR("Failed to allocate exec or object list "
+			  "for %d buffers\n",
+			  args->buffer_count);
+		ret = -ENOMEM;
+		goto pre_mutex_err;
+	}
+	ret = copy_from_user(exec_list,
+			     (struct drm_i915_relocation_entry __user *)
+			     (uintptr_t) args->buffers_ptr,
+			     sizeof(*exec_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_ERROR("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		goto pre_mutex_err;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	if (dev_priv->mm.wedged) {
+		DRM_ERROR("Execbuf while wedged\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EIO;
+	}
+
+	if (dev_priv->mm.suspended) {
+		DRM_ERROR("Execbuf while VT-switched.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EBUSY;
+	}
+
+	/* Zero the gloabl flush/invalidate flags. These
+	 * will be modified as each object is bound to the
+	 * gtt
+	 */
+	dev->invalidate_domains = 0;
+	dev->flush_domains = 0;
+
+	/* Look up object handles and perform the relocations */
+	for (i = 0; i < args->buffer_count; i++) {
+		object_list[i] = drm_gem_object_lookup(dev, file_priv,
+						       exec_list[i].handle);
+		if (object_list[i] == NULL) {
+			DRM_ERROR("Invalid object handle %d at index %d\n",
+				   exec_list[i].handle, i);
+			ret = -EBADF;
+			goto err;
+		}
+
+		object_list[i]->pending_read_domains = 0;
+		object_list[i]->pending_write_domain = 0;
+		ret = i915_gem_object_pin_and_relocate(object_list[i],
+						       file_priv,
+						       &exec_list[i]);
+		if (ret) {
+			DRM_ERROR("object bind and relocate failed %d\n", ret);
+			goto err;
+		}
+		pinned = i + 1;
+	}
+
+	/* Set the pending read domains for the batch buffer to COMMAND */
+	batch_obj = object_list[args->buffer_count-1];
+	batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+	batch_obj->pending_write_domain = 0;
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	for (i = 0; i < args->buffer_count; i++) {
+		struct drm_gem_object *obj = object_list[i];
+		struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+		if (obj_priv->gtt_space == NULL) {
+			/* We evicted the buffer in the process of validating
+			 * our set of buffers in.  We could try to recover by
+			 * kicking them everything out and trying again from
+			 * the start.
+			 */
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		/* make sure all previous memory operations have passed */
+		ret = i915_gem_object_set_domain(obj,
+						 obj->pending_read_domains,
+						 obj->pending_write_domain);
+		if (ret)
+			goto err;
+	}
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	/* Flush/invalidate caches and chipset buffer */
+	flush_domains = i915_gem_dev_set_domain(dev);
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+#if WATCH_COHERENCY
+	for (i = 0; i < args->buffer_count; i++) {
+		i915_gem_object_check_coherency(object_list[i],
+						exec_list[i].handle);
+	}
+#endif
+
+	exec_offset = exec_list[args->buffer_count - 1].offset;
+
+#if WATCH_EXEC
+	i915_gem_dump_object(object_list[args->buffer_count - 1],
+			      args->batch_len,
+			      __func__,
+			      ~0);
+#endif
+
+	(void)i915_add_request(dev, flush_domains);
+
+	/* Exec the batchbuffer */
+	ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+	if (ret) {
+		DRM_ERROR("dispatch failed %d\n", ret);
+		goto err;
+	}
+
+	/*
+	 * Ensure that the commands in the batch buffer are
+	 * finished before the interrupt fires
+	 */
+	flush_domains = i915_retire_commands(dev);
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	/*
+	 * Get a seqno representing the execution of the current buffer,
+	 * which we can wait on.  We would like to mitigate these interrupts,
+	 * likely by only creating seqnos occasionally (so that we have
+	 * *some* interrupts representing completion of buffers that we can
+	 * wait on when trying to clear up gtt space).
+	 */
+	seqno = i915_add_request(dev, flush_domains);
+	BUG_ON(seqno == 0);
+	i915_file_priv->mm.last_gem_seqno = seqno;
+	for (i = 0; i < args->buffer_count; i++) {
+		struct drm_gem_object *obj = object_list[i];
+		struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+		i915_gem_object_move_to_active(obj);
+		obj_priv->last_rendering_seqno = seqno;
+#if WATCH_LRU
+		DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+#endif
+	}
+#if WATCH_LRU
+	i915_dump_lru(dev, __func__);
+#endif
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	/* Copy the new buffer offsets back to the user's exec list. */
+	ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+			   (uintptr_t) args->buffers_ptr,
+			   exec_list,
+			   sizeof(*exec_list) * args->buffer_count);
+	if (ret)
+		DRM_ERROR("failed to copy %d exec entries "
+			  "back to user (%d)\n",
+			   args->buffer_count, ret);
+err:
+	if (object_list != NULL) {
+		for (i = 0; i < pinned; i++)
+			i915_gem_object_unpin(object_list[i]);
+
+		for (i = 0; i < args->buffer_count; i++)
+			drm_gem_object_unreference(object_list[i]);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+	drm_free(object_list, sizeof(*object_list) * args->buffer_count,
+		 DRM_MEM_DRIVER);
+	drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+		 DRM_MEM_DRIVER);
+
+	return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int ret;
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+	if (obj_priv->gtt_space == NULL) {
+		ret = i915_gem_object_bind_to_gtt(obj, alignment);
+		if (ret != 0) {
+			DRM_ERROR("Failure to bind: %d", ret);
+			return ret;
+		}
+	}
+	obj_priv->pin_count++;
+
+	/* If the object is not active and not pending a flush,
+	 * remove it from the inactive list
+	 */
+	if (obj_priv->pin_count == 1) {
+		atomic_inc(&dev->pin_count);
+		atomic_add(obj->size, &dev->pin_memory);
+		if (!obj_priv->active &&
+		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+					   I915_GEM_DOMAIN_GTT)) == 0 &&
+		    !list_empty(&obj_priv->list))
+			list_del_init(&obj_priv->list);
+	}
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+	obj_priv->pin_count--;
+	BUG_ON(obj_priv->pin_count < 0);
+	BUG_ON(obj_priv->gtt_space == NULL);
+
+	/* If the object is no longer pinned, and is
+	 * neither active nor being flushed, then stick it on
+	 * the inactive list
+	 */
+	if (obj_priv->pin_count == 0) {
+		if (!obj_priv->active &&
+		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+					   I915_GEM_DOMAIN_GTT)) == 0)
+			list_move_tail(&obj_priv->list,
+				       &dev_priv->mm.inactive_list);
+		atomic_dec(&dev->pin_count);
+		atomic_sub(obj->size, &dev->pin_memory);
+	}
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_pin *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL) {
+		DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+			  args->handle);
+		mutex_unlock(&dev->struct_mutex);
+		return -EBADF;
+	}
+	obj_priv = obj->driver_private;
+
+	ret = i915_gem_object_pin(obj, args->alignment);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	/* XXX - flush the CPU caches for pinned objects
+	 * as the X server doesn't manage domains yet
+	 */
+	if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+		i915_gem_clflush_object(obj);
+		drm_agp_chipset_flush(dev);
+		obj->write_domain = 0;
+	}
+	args->offset = obj_priv->gtt_offset;
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_i915_gem_pin *args = data;
+	struct drm_gem_object *obj;
+
+	mutex_lock(&dev->struct_mutex);
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL) {
+		DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+			  args->handle);
+		mutex_unlock(&dev->struct_mutex);
+		return -EBADF;
+	}
+
+	i915_gem_object_unpin(obj);
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_i915_gem_busy *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL) {
+		DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+			  args->handle);
+		mutex_unlock(&dev->struct_mutex);
+		return -EBADF;
+	}
+
+	obj_priv = obj->driver_private;
+	args->busy = obj_priv->active;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+    return i915_gem_ring_throttle(dev, file_priv);
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv;
+
+	obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+	if (obj_priv == NULL)
+		return -ENOMEM;
+
+	/*
+	 * We've just allocated pages from the kernel,
+	 * so they've just been written by the CPU with
+	 * zeros. They'll need to be clflushed before we
+	 * use them with the GPU.
+	 */
+	obj->write_domain = I915_GEM_DOMAIN_CPU;
+	obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+	obj_priv->agp_type = AGP_USER_MEMORY;
+
+	obj->driver_private = obj_priv;
+	obj_priv->obj = obj;
+	INIT_LIST_HEAD(&obj_priv->list);
+	return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+	while (obj_priv->pin_count > 0)
+		i915_gem_object_unpin(obj);
+
+	i915_gem_object_unbind(obj);
+
+	drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+	drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+}
+
+static int
+i915_gem_set_domain(struct drm_gem_object *obj,
+		    struct drm_file *file_priv,
+		    uint32_t read_domains,
+		    uint32_t write_domain)
+{
+	struct drm_device *dev = obj->dev;
+	int ret;
+	uint32_t flush_domains;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
+	if (ret)
+		return ret;
+	flush_domains = i915_gem_dev_set_domain(obj->dev);
+
+	if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
+		(void) i915_add_request(dev, flush_domains);
+
+	return 0;
+}
+
+/** Unbinds all objects that are on the given buffer list. */
+static int
+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+{
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+
+	while (!list_empty(head)) {
+		obj_priv = list_first_entry(head,
+					    struct drm_i915_gem_object,
+					    list);
+		obj = obj_priv->obj;
+
+		if (obj_priv->pin_count != 0) {
+			DRM_ERROR("Pinned object in unbind list\n");
+			mutex_unlock(&dev->struct_mutex);
+			return -EINVAL;
+		}
+
+		ret = i915_gem_object_unbind(obj);
+		if (ret != 0) {
+			DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
+				  ret);
+			mutex_unlock(&dev->struct_mutex);
+			return ret;
+		}
+	}
+
+
+	return 0;
+}
+
+static int
+i915_gem_idle(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t seqno, cur_seqno, last_seqno;
+	int stuck, ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		return 0;
+	}
+
+	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
+	 * We need to replace this with a semaphore, or something.
+	 */
+	dev_priv->mm.suspended = 1;
+
+	/* Cancel the retire work handler, wait for it to finish if running
+	 */
+	mutex_unlock(&dev->struct_mutex);
+	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+	mutex_lock(&dev->struct_mutex);
+
+	i915_kernel_lost_context(dev);
+
+	/* Flush the GPU along with all non-CPU write domains
+	 */
+	i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
+		       ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+	seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
+					I915_GEM_DOMAIN_GTT));
+
+	if (seqno == 0) {
+		mutex_unlock(&dev->struct_mutex);
+		return -ENOMEM;
+	}
+
+	dev_priv->mm.waiting_gem_seqno = seqno;
+	last_seqno = 0;
+	stuck = 0;
+	for (;;) {
+		cur_seqno = i915_get_gem_seqno(dev);
+		if (i915_seqno_passed(cur_seqno, seqno))
+			break;
+		if (last_seqno == cur_seqno) {
+			if (stuck++ > 100) {
+				DRM_ERROR("hardware wedged\n");
+				dev_priv->mm.wedged = 1;
+				DRM_WAKEUP(&dev_priv->irq_queue);
+				break;
+			}
+		}
+		msleep(10);
+		last_seqno = cur_seqno;
+	}
+	dev_priv->mm.waiting_gem_seqno = 0;
+
+	i915_gem_retire_requests(dev);
+
+	/* Active and flushing should now be empty as we've
+	 * waited for a sequence higher than any pending execbuffer
+	 */
+	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+	/* Request should now be empty as we've also waited
+	 * for the last request in the list
+	 */
+	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+	/* Move all buffers out of the GTT. */
+	ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+	i915_gem_cleanup_ringbuffer(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int
+i915_gem_init_hws(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+
+	/* If we need a physical address for the status page, it's already
+	 * initialized at driver load time.
+	 */
+	if (!I915_NEED_GFX_HWS(dev))
+		return 0;
+
+	obj = drm_gem_object_alloc(dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate status page\n");
+		return -ENOMEM;
+	}
+	obj_priv = obj->driver_private;
+	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+	ret = i915_gem_object_pin(obj, 4096);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		return ret;
+	}
+
+	dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+
+	dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
+	if (dev_priv->hw_status_page == NULL) {
+		DRM_ERROR("Failed to map status page.\n");
+		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+		drm_gem_object_unreference(obj);
+		return -EINVAL;
+	}
+	dev_priv->hws_obj = obj;
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+	I915_READ(HWS_PGA); /* posting read */
+	DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+
+	return 0;
+}
+
+static int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+	u32 head;
+
+	ret = i915_gem_init_hws(dev);
+	if (ret != 0)
+		return ret;
+
+	obj = drm_gem_object_alloc(dev, 128 * 1024);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate ringbuffer\n");
+		return -ENOMEM;
+	}
+	obj_priv = obj->driver_private;
+
+	ret = i915_gem_object_pin(obj, 4096);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		return ret;
+	}
+
+	/* Set up the kernel mapping for the ring. */
+	dev_priv->ring.Size = obj->size;
+	dev_priv->ring.tail_mask = obj->size - 1;
+
+	dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
+	dev_priv->ring.map.size = obj->size;
+	dev_priv->ring.map.type = 0;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+	if (dev_priv->ring.map.handle == NULL) {
+		DRM_ERROR("Failed to map ringbuffer.\n");
+		memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+		drm_gem_object_unreference(obj);
+		return -EINVAL;
+	}
+	dev_priv->ring.ring_obj = obj;
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	/* Stop the ring if it's running. */
+	I915_WRITE(PRB0_CTL, 0);
+	I915_WRITE(PRB0_TAIL, 0);
+	I915_WRITE(PRB0_HEAD, 0);
+
+	/* Initialize the ring. */
+	I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+
+	/* G45 ring initialization fails to reset head to zero */
+	if (head != 0) {
+		DRM_ERROR("Ring head not reset to zero "
+			  "ctl %08x head %08x tail %08x start %08x\n",
+			  I915_READ(PRB0_CTL),
+			  I915_READ(PRB0_HEAD),
+			  I915_READ(PRB0_TAIL),
+			  I915_READ(PRB0_START));
+		I915_WRITE(PRB0_HEAD, 0);
+
+		DRM_ERROR("Ring head forced to zero "
+			  "ctl %08x head %08x tail %08x start %08x\n",
+			  I915_READ(PRB0_CTL),
+			  I915_READ(PRB0_HEAD),
+			  I915_READ(PRB0_TAIL),
+			  I915_READ(PRB0_START));
+	}
+
+	I915_WRITE(PRB0_CTL,
+		   ((obj->size - 4096) & RING_NR_PAGES) |
+		   RING_NO_REPORT |
+		   RING_VALID);
+
+	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+
+	/* If the head is still not zero, the ring is dead */
+	if (head != 0) {
+		DRM_ERROR("Ring initialization failed "
+			  "ctl %08x head %08x tail %08x start %08x\n",
+			  I915_READ(PRB0_CTL),
+			  I915_READ(PRB0_HEAD),
+			  I915_READ(PRB0_TAIL),
+			  I915_READ(PRB0_START));
+		return -EIO;
+	}
+
+	/* Update our cache of the ring state */
+	i915_kernel_lost_context(dev);
+
+	return 0;
+}
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->ring.ring_obj == NULL)
+		return;
+
+	drm_core_ioremapfree(&dev_priv->ring.map, dev);
+
+	i915_gem_object_unpin(dev_priv->ring.ring_obj);
+	drm_gem_object_unreference(dev_priv->ring.ring_obj);
+	dev_priv->ring.ring_obj = NULL;
+	memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+
+	if (dev_priv->hws_obj != NULL) {
+		struct drm_gem_object *obj = dev_priv->hws_obj;
+		struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+		kunmap(obj_priv->page_list[0]);
+		i915_gem_object_unpin(obj);
+		drm_gem_object_unreference(obj);
+		dev_priv->hws_obj = NULL;
+		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+		dev_priv->hw_status_page = NULL;
+
+		/* Write high address into HWS_PGA when disabling. */
+		I915_WRITE(HWS_PGA, 0x1ffff000);
+	}
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (dev_priv->mm.wedged) {
+		DRM_ERROR("Reenabling wedged hardware, good luck\n");
+		dev_priv->mm.wedged = 0;
+	}
+
+	ret = i915_gem_init_ringbuffer(dev);
+	if (ret != 0)
+		return ret;
+
+	mutex_lock(&dev->struct_mutex);
+	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+	dev_priv->mm.suspended = 0;
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_irq_install(dev);
+
+	return 0;
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	int ret;
+
+	ret = i915_gem_idle(dev);
+	drm_irq_uninstall(dev);
+
+	return ret;
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+	int ret;
+
+	ret = i915_gem_idle(dev);
+	if (ret)
+		DRM_ERROR("failed to idle hardware: %d\n", ret);
+}
+
+void
+i915_gem_load(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	INIT_LIST_HEAD(&dev_priv->mm.active_list);
+	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+	INIT_LIST_HEAD(&dev_priv->mm.request_list);
+	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+			  i915_gem_retire_work_handler);
+	INIT_WORK(&dev_priv->mm.vblank_work,
+		  i915_gem_vblank_work_handler);
+	dev_priv->mm.next_gem_seqno = 1;
+
+	i915_gem_detect_bit_6_swizzle(dev);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
new file mode 100644
index 000000000000..131c088f8c8a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#if WATCH_INACTIVE
+void
+i915_verify_inactive(struct drm_device *dev, char *file, int line)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+		obj = obj_priv->obj;
+		if (obj_priv->pin_count || obj_priv->active ||
+		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+					   I915_GEM_DOMAIN_GTT)))
+			DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
+				  obj,
+				  obj_priv->pin_count, obj_priv->active,
+				  obj->write_domain, file, line);
+	}
+}
+#endif /* WATCH_INACTIVE */
+
+
+#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+static void
+i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+		   uint32_t bias, uint32_t mark)
+{
+	uint32_t *mem = kmap_atomic(page, KM_USER0);
+	int i;
+	for (i = start; i < end; i += 4)
+		DRM_INFO("%08x: %08x%s\n",
+			  (int) (bias + i), mem[i / 4],
+			  (bias + i == mark) ? " ********" : "");
+	kunmap_atomic(mem, KM_USER0);
+	/* give syslog time to catch up */
+	msleep(1);
+}
+
+void
+i915_gem_dump_object(struct drm_gem_object *obj, int len,
+		     const char *where, uint32_t mark)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int page;
+
+	DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+	for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
+		int page_len, chunk, chunk_len;
+
+		page_len = len - page * PAGE_SIZE;
+		if (page_len > PAGE_SIZE)
+			page_len = PAGE_SIZE;
+
+		for (chunk = 0; chunk < page_len; chunk += 128) {
+			chunk_len = page_len - chunk;
+			if (chunk_len > 128)
+				chunk_len = 128;
+			i915_gem_dump_page(obj_priv->page_list[page],
+					   chunk, chunk + chunk_len,
+					   obj_priv->gtt_offset +
+					   page * PAGE_SIZE,
+					   mark);
+		}
+	}
+}
+#endif
+
+#if WATCH_LRU
+void
+i915_dump_lru(struct drm_device *dev, const char *where)
+{
+	drm_i915_private_t		*dev_priv = dev->dev_private;
+	struct drm_i915_gem_object	*obj_priv;
+
+	DRM_INFO("active list %s {\n", where);
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+			    list)
+	{
+		DRM_INFO("    %p: %08x\n", obj_priv,
+			 obj_priv->last_rendering_seqno);
+	}
+	DRM_INFO("}\n");
+	DRM_INFO("flushing list %s {\n", where);
+	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+			    list)
+	{
+		DRM_INFO("    %p: %08x\n", obj_priv,
+			 obj_priv->last_rendering_seqno);
+	}
+	DRM_INFO("}\n");
+	DRM_INFO("inactive %s {\n", where);
+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+		DRM_INFO("    %p: %08x\n", obj_priv,
+			 obj_priv->last_rendering_seqno);
+	}
+	DRM_INFO("}\n");
+}
+#endif
+
+
+#if WATCH_COHERENCY
+void
+i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int page;
+	uint32_t *gtt_mapping;
+	uint32_t *backing_map = NULL;
+	int bad_count = 0;
+
+	DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+		 __func__, obj, obj_priv->gtt_offset, handle,
+		 obj->size / 1024);
+
+	gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
+			      obj->size);
+	if (gtt_mapping == NULL) {
+		DRM_ERROR("failed to map GTT space\n");
+		return;
+	}
+
+	for (page = 0; page < obj->size / PAGE_SIZE; page++) {
+		int i;
+
+		backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+
+		if (backing_map == NULL) {
+			DRM_ERROR("failed to map backing page\n");
+			goto out;
+		}
+
+		for (i = 0; i < PAGE_SIZE / 4; i++) {
+			uint32_t cpuval = backing_map[i];
+			uint32_t gttval = readl(gtt_mapping +
+						page * 1024 + i);
+
+			if (cpuval != gttval) {
+				DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
+					 "0x%08x vs 0x%08x\n",
+					 (int)(obj_priv->gtt_offset +
+					       page * PAGE_SIZE + i * 4),
+					 cpuval, gttval);
+				if (bad_count++ >= 8) {
+					DRM_INFO("...\n");
+					goto out;
+				}
+			}
+		}
+		kunmap_atomic(backing_map, KM_USER0);
+		backing_map = NULL;
+	}
+
+ out:
+	if (backing_map != NULL)
+		kunmap_atomic(backing_map, KM_USER0);
+	iounmap(gtt_mapping);
+
+	/* give syslog time to catch up */
+	msleep(1);
+
+	/* Directly flush the object, since we just loaded values with the CPU
+	 * from the backing pages and we don't want to disturb the cache
+	 * management that we're trying to observe.
+	 */
+
+	i915_gem_clflush_object(obj);
+}
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
new file mode 100644
index 000000000000..15d4160415b0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -0,0 +1,292 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static int i915_gem_active_info(char *buf, char **start, off_t offset,
+				int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Active:\n");
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+			    list)
+	{
+		struct drm_gem_object *obj = obj_priv->obj;
+		if (obj->name) {
+			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+				       obj, obj->name,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		} else {
+			DRM_PROC_PRINT("       %p: %08x %08x %d\n",
+				       obj,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		}
+	}
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
+				  int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Flushing:\n");
+	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+			    list)
+	{
+		struct drm_gem_object *obj = obj_priv->obj;
+		if (obj->name) {
+			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+				       obj, obj->name,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		} else {
+			DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		}
+	}
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
+				  int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Inactive:\n");
+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
+			    list)
+	{
+		struct drm_gem_object *obj = obj_priv->obj;
+		if (obj->name) {
+			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+				       obj, obj->name,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		} else {
+			DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		}
+	}
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int i915_gem_request_info(char *buf, char **start, off_t offset,
+				 int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_request *gem_request;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Request:\n");
+	list_for_each_entry(gem_request, &dev_priv->mm.request_list,
+			    list)
+	{
+		DRM_PROC_PRINT("    %d @ %d %08x\n",
+			       gem_request->seqno,
+			       (int) (jiffies - gem_request->emitted_jiffies),
+			       gem_request->flush_domains);
+	}
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
+			       int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+	DRM_PROC_PRINT("Waiter sequence:  %d\n",
+		       dev_priv->mm.waiting_gem_seqno);
+	DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+
+static int i915_interrupt_info(char *buf, char **start, off_t offset,
+			       int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Interrupt enable:    %08x\n",
+		       I915_READ(IER));
+	DRM_PROC_PRINT("Interrupt identity:  %08x\n",
+		       I915_READ(IIR));
+	DRM_PROC_PRINT("Interrupt mask:      %08x\n",
+		       I915_READ(IMR));
+	DRM_PROC_PRINT("Pipe A stat:         %08x\n",
+		       I915_READ(PIPEASTAT));
+	DRM_PROC_PRINT("Pipe B stat:         %08x\n",
+		       I915_READ(PIPEBSTAT));
+	DRM_PROC_PRINT("Interrupts received: %d\n",
+		       atomic_read(&dev_priv->irq_received));
+	DRM_PROC_PRINT("Current sequence:    %d\n",
+		       i915_get_gem_seqno(dev));
+	DRM_PROC_PRINT("Waiter sequence:     %d\n",
+		       dev_priv->mm.waiting_gem_seqno);
+	DRM_PROC_PRINT("IRQ sequence:        %d\n",
+		       dev_priv->mm.irq_gem_seqno);
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static struct drm_proc_list {
+	/** file name */
+	const char *name;
+	/** proc callback*/
+	int (*f) (char *, char **, off_t, int, int *, void *);
+} i915_gem_proc_list[] = {
+	{"i915_gem_active", i915_gem_active_info},
+	{"i915_gem_flushing", i915_gem_flushing_info},
+	{"i915_gem_inactive", i915_gem_inactive_info},
+	{"i915_gem_request", i915_gem_request_info},
+	{"i915_gem_seqno", i915_gem_seqno_info},
+	{"i915_gem_interrupt", i915_interrupt_info},
+};
+
+#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
+
+int i915_gem_proc_init(struct drm_minor *minor)
+{
+	struct proc_dir_entry *ent;
+	int i, j;
+
+	for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
+		ent = create_proc_entry(i915_gem_proc_list[i].name,
+					S_IFREG | S_IRUGO, minor->dev_root);
+		if (!ent) {
+			DRM_ERROR("Cannot create /proc/dri/.../%s\n",
+				  i915_gem_proc_list[i].name);
+			for (j = 0; j < i; j++)
+				remove_proc_entry(i915_gem_proc_list[i].name,
+						  minor->dev_root);
+			return -1;
+		}
+		ent->read_proc = i915_gem_proc_list[i].f;
+		ent->data = minor;
+	}
+	return 0;
+}
+
+void i915_gem_proc_cleanup(struct drm_minor *minor)
+{
+	int i;
+
+	if (!minor->dev_root)
+		return;
+
+	for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
+		remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
new file mode 100644
index 000000000000..e8b85ac4ca04
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y.  So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics.  This
+ * is called "Channel XOR Randomization" in the MCH documentation.  The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+	if (!IS_I9XX(dev)) {
+		/* As far as we know, the 865 doesn't have these bit 6
+		 * swizzling issues.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
+		   IS_GM45(dev)) {
+		uint32_t dcc;
+
+		/* On 915-945 and GM965, channel interleave by the CPU is
+		 * determined by DCC.  The CPU will alternate based on bit 6
+		 * in interleaved mode, and the GPU will then also alternate
+		 * on bit 6, 9, and 10 for X, but the CPU may also optionally
+		 * alternate based on bit 17 (XOR not disabled and XOR
+		 * bit == 17).
+		 */
+		dcc = I915_READ(DCC);
+		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			break;
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+			if (IS_I915G(dev) || IS_I915GM(dev) ||
+			    dcc & DCC_CHANNEL_XOR_DISABLE) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else if (IS_I965GM(dev) || IS_GM45(dev)) {
+				/* GM965 only does bit 11-based channel
+				 * randomization
+				 */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+			} else {
+				/* Bit 17 or perhaps other swizzling */
+				swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+				swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+			}
+			break;
+		}
+		if (dcc == 0xffffffff) {
+			DRM_ERROR("Couldn't read from MCHBAR.  "
+				  "Disabling tiling.\n");
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+		}
+	} else {
+		/* The 965, G33, and newer, have a very flexible memory
+		 * configuration.  It will enable dual-channel mode
+		 * (interleaving) on as much memory as it can, and the GPU
+		 * will additionally sometimes enable different bit 6
+		 * swizzling for tiled objects from the CPU.
+		 *
+		 * Here's what I found on the G965:
+		 *    slot fill         memory size  swizzling
+		 * 0A   0B   1A   1B    1-ch   2-ch
+		 * 512  0    0    0     512    0     O
+		 * 512  0    512  0     16     1008  X
+		 * 512  0    0    512   16     1008  X
+		 * 0    512  0    512   16     1008  X
+		 * 1024 1024 1024 0     2048   1024  O
+		 *
+		 * We could probably detect this based on either the DRB
+		 * matching, which was the case for the swizzling required in
+		 * the table above, or from the 1-ch value being less than
+		 * the minimum size of a rank.
+		 */
+		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+		} else {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		}
+	}
+
+	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_set_tiling *args = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EINVAL;
+	obj_priv = obj->driver_private;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (args->tiling_mode == I915_TILING_NONE) {
+		obj_priv->tiling_mode = I915_TILING_NONE;
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+	} else {
+		if (args->tiling_mode == I915_TILING_X)
+			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		else
+			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+		/* If we can't handle the swizzling, make it untiled. */
+		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+			args->tiling_mode = I915_TILING_NONE;
+			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+		}
+	}
+	obj_priv->tiling_mode = args->tiling_mode;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_gem_object_unreference(obj);
+
+	return 0;
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_get_tiling *args = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EINVAL;
+	obj_priv = obj->driver_private;
+
+	mutex_lock(&dev->struct_mutex);
+
+	args->tiling_mode = obj_priv->tiling_mode;
+	switch (obj_priv->tiling_mode) {
+	case I915_TILING_X:
+		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		break;
+	case I915_TILING_Y:
+		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+		break;
+	case I915_TILING_NONE:
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+		break;
+	default:
+		DRM_ERROR("unknown tiling mode\n");
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_gem_object_unreference(obj);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index df036118b8b1..baae511c785b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,12 +31,92 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-#define USER_INT_FLAG (1<<1)
-#define VSYNC_PIPEB_FLAG (1<<5)
-#define VSYNC_PIPEA_FLAG (1<<7)
-
 #define MAX_NOPID ((u32)~0)
 
+/** These are the interrupts used by the driver */
+#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT |		\
+				    I915_ASLE_INTERRUPT |		\
+				    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+				    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
+void
+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	if ((dev_priv->irq_mask_reg & mask) != 0) {
+		dev_priv->irq_mask_reg &= ~mask;
+		I915_WRITE(IMR, dev_priv->irq_mask_reg);
+		(void) I915_READ(IMR);
+	}
+}
+
+static inline void
+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	if ((dev_priv->irq_mask_reg & mask) != mask) {
+		dev_priv->irq_mask_reg |= mask;
+		I915_WRITE(IMR, dev_priv->irq_mask_reg);
+		(void) I915_READ(IMR);
+	}
+}
+
+/**
+ * i915_get_pipe - return the the pipe associated with a given plane
+ * @dev: DRM device
+ * @plane: plane to look for
+ *
+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
+ * rather than a pipe number, since they may not always be equal.  This routine
+ * maps the given @plane back to a pipe number.
+ */
+static int
+i915_get_pipe(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 dspcntr;
+
+	dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
+
+	return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
+}
+
+/**
+ * i915_get_plane - return the the plane associated with a given pipe
+ * @dev: DRM device
+ * @pipe: pipe to look for
+ *
+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
+ * rather than a plane number, since they may not always be equal.  This routine
+ * maps the given @pipe back to a plane number.
+ */
+static int
+i915_get_plane(struct drm_device *dev, int pipe)
+{
+	if (i915_get_pipe(dev, 0) == pipe)
+		return 0;
+	return 1;
+}
+
+/**
+ * i915_pipe_enabled - check if a pipe is enabled
+ * @dev: DRM device
+ * @pipe: pipe to check
+ *
+ * Reading certain registers when the pipe is disabled can hang the chip.
+ * Use this routine to make sure the PLL is running and the pipe is active
+ * before reading such registers if unsure.
+ */
+static int
+i915_pipe_enabled(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
+
+	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
+		return 1;
+
+	return 0;
+}
+
 /**
  * Emit blits for scheduled buffer swaps.
  *
@@ -48,8 +128,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 	unsigned long irqflags;
 	struct list_head *list, *tmp, hits, *hit;
 	int nhits, nrects, slice[2], upper[2], lower[2], i;
-	unsigned counter[2] = { atomic_read(&dev->vbl_received),
-				atomic_read(&dev->vbl_received2) };
+	unsigned counter[2];
 	struct drm_drawable_info *drw;
 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	u32 cpp = dev_priv->cpp;
@@ -71,6 +150,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 		src_pitch >>= 2;
 	}
 
+	counter[0] = drm_vblank_count(dev, 0);
+	counter[1] = drm_vblank_count(dev, 1);
+
 	DRM_DEBUG("\n");
 
 	INIT_LIST_HEAD(&hits);
@@ -83,12 +165,14 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 	list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
 		drm_i915_vbl_swap_t *vbl_swap =
 			list_entry(list, drm_i915_vbl_swap_t, head);
+		int pipe = i915_get_pipe(dev, vbl_swap->plane);
 
-		if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
+		if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
 			continue;
 
 		list_del(list);
 		dev_priv->swaps_pending--;
+		drm_vblank_put(dev, pipe);
 
 		spin_unlock(&dev_priv->swaps_lock);
 		spin_lock(&dev->drw_lock);
@@ -181,7 +265,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 			drm_i915_vbl_swap_t *swap_hit =
 				list_entry(hit, drm_i915_vbl_swap_t, head);
 			struct drm_clip_rect *rect;
-			int num_rects, pipe;
+			int num_rects, plane;
 			unsigned short top, bottom;
 
 			drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@@ -190,9 +274,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 				continue;
 
 			rect = drw->rects;
-			pipe = swap_hit->pipe;
-			top = upper[pipe];
-			bottom = lower[pipe];
+			plane = swap_hit->plane;
+			top = upper[plane];
+			bottom = lower[plane];
 
 			for (num_rects = drw->num_rects; num_rects--; rect++) {
 				int y1 = max(rect->y1, top);
@@ -229,61 +313,139 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 	}
 }
 
+u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long high_frame;
+	unsigned long low_frame;
+	u32 high1, high2, low, count;
+	int pipe;
+
+	pipe = i915_get_pipe(dev, plane);
+	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
+	if (!i915_pipe_enabled(dev, pipe)) {
+		DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
+		return 0;
+	}
+
+	/*
+	 * High & low register fields aren't synchronized, so make sure
+	 * we get a low value that's stable across two reads of the high
+	 * register.
+	 */
+	do {
+		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+			PIPE_FRAME_LOW_SHIFT);
+		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+	} while (high1 != high2);
+
+	count = (high1 << 8) | low;
+
+	return count;
+}
+
+void
+i915_gem_vblank_work_handler(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv;
+	struct drm_device *dev;
+
+	dev_priv = container_of(work, drm_i915_private_t,
+				mm.vblank_work);
+	dev = dev_priv->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	i915_vblank_tasklet(dev);
+	mutex_unlock(&dev->struct_mutex);
+}
+
 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u16 temp;
+	u32 iir;
 	u32 pipea_stats, pipeb_stats;
+	int vblank = 0;
 
-	pipea_stats = I915_READ(I915REG_PIPEASTAT);
-	pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
+	atomic_inc(&dev_priv->irq_received);
 
-	temp = I915_READ16(I915REG_INT_IDENTITY_R);
+	if (dev->pdev->msi_enabled)
+		I915_WRITE(IMR, ~0);
+	iir = I915_READ(IIR);
 
-	temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
+	if (iir == 0) {
+		if (dev->pdev->msi_enabled) {
+			I915_WRITE(IMR, dev_priv->irq_mask_reg);
+			(void) I915_READ(IMR);
+		}
+		return IRQ_NONE;
+	}
 
-	DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
+	/*
+	 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
+	 * we may get extra interrupts.
+	 */
+	if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
+		pipea_stats = I915_READ(PIPEASTAT);
+		if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
+			pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+					 PIPE_VBLANK_INTERRUPT_ENABLE);
+		else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+					PIPE_VBLANK_INTERRUPT_STATUS)) {
+			vblank++;
+			drm_handle_vblank(dev, i915_get_plane(dev, 0));
+		}
 
-	if (temp == 0)
-		return IRQ_NONE;
+		I915_WRITE(PIPEASTAT, pipea_stats);
+	}
+	if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
+		pipeb_stats = I915_READ(PIPEBSTAT);
+		/* Ack the event */
+		I915_WRITE(PIPEBSTAT, pipeb_stats);
+
+		/* The vblank interrupt gets enabled even if we didn't ask for
+		   it, so make sure it's shut down again */
+		if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
+			pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+					 PIPE_VBLANK_INTERRUPT_ENABLE);
+		else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+					PIPE_VBLANK_INTERRUPT_STATUS)) {
+			vblank++;
+			drm_handle_vblank(dev, i915_get_plane(dev, 1));
+		}
 
-	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
-	(void) I915_READ16(I915REG_INT_IDENTITY_R);
-	DRM_READMEMORYBARRIER();
+		if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
+			opregion_asle_intr(dev);
+		I915_WRITE(PIPEBSTAT, pipeb_stats);
+	}
+
+	I915_WRITE(IIR, iir);
+	if (dev->pdev->msi_enabled)
+		I915_WRITE(IMR, dev_priv->irq_mask_reg);
+	(void) I915_READ(IIR); /* Flush posted writes */
 
-	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->last_dispatch =
+			READ_BREADCRUMB(dev_priv);
 
-	if (temp & USER_INT_FLAG)
+	if (iir & I915_USER_INTERRUPT) {
+		dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
 		DRM_WAKEUP(&dev_priv->irq_queue);
+	}
 
-	if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
-		int vblank_pipe = dev_priv->vblank_pipe;
-
-		if ((vblank_pipe &
-		     (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
-		    == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
-			if (temp & VSYNC_PIPEA_FLAG)
-				atomic_inc(&dev->vbl_received);
-			if (temp & VSYNC_PIPEB_FLAG)
-				atomic_inc(&dev->vbl_received2);
-		} else if (((temp & VSYNC_PIPEA_FLAG) &&
-			    (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
-			   ((temp & VSYNC_PIPEB_FLAG) &&
-			    (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
-			atomic_inc(&dev->vbl_received);
-
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
-
-		if (dev_priv->swaps_pending > 0)
+	if (iir & I915_ASLE_INTERRUPT)
+		opregion_asle_intr(dev);
+
+	if (vblank && dev_priv->swaps_pending > 0) {
+		if (dev_priv->ring.ring_obj == NULL)
 			drm_locked_tasklet(dev, i915_vblank_tasklet);
-		I915_WRITE(I915REG_PIPEASTAT,
-			pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
-			I915_VBLANK_CLEAR);
-		I915_WRITE(I915REG_PIPEBSTAT,
-			pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
-			I915_VBLANK_CLEAR);
+		else
+			schedule_work(&dev_priv->mm.vblank_work);
 	}
 
 	return IRQ_HANDLED;
@@ -298,23 +460,45 @@ static int i915_emit_irq(struct drm_device * dev)
 
 	DRM_DEBUG("\n");
 
-	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
-
+	dev_priv->counter++;
 	if (dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+		dev_priv->counter = 1;
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
 	BEGIN_LP_RING(6);
-	OUT_RING(CMD_STORE_DWORD_IDX);
-	OUT_RING(20);
+	OUT_RING(MI_STORE_DWORD_INDEX);
+	OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
 	OUT_RING(dev_priv->counter);
 	OUT_RING(0);
 	OUT_RING(0);
-	OUT_RING(GFX_OP_USER_INTERRUPT);
+	OUT_RING(MI_USER_INTERRUPT);
 	ADVANCE_LP_RING();
 
 	return dev_priv->counter;
 }
 
+void i915_user_irq_get(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	spin_lock(&dev_priv->user_irq_lock);
+	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
+		i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+	spin_unlock(&dev_priv->user_irq_lock);
+}
+
+void i915_user_irq_put(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	spin_lock(&dev_priv->user_irq_lock);
+	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
+	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
+		i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+	spin_unlock(&dev_priv->user_irq_lock);
+}
+
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -323,55 +507,34 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
 		  READ_BREADCRUMB(dev_priv));
 
-	if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+		if (dev_priv->sarea_priv) {
+			dev_priv->sarea_priv->last_dispatch =
+				READ_BREADCRUMB(dev_priv);
+		}
 		return 0;
+	}
 
-	dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
+	i915_user_irq_get(dev);
 	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
+	i915_user_irq_put(dev);
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
 			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
 	}
 
-	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-	return ret;
-}
-
-static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
-				      atomic_t *counter)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	unsigned int cur_vblank;
-	int ret = 0;
-
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(counter))
-			- *sequence) <= (1<<23)));
-
-	*sequence = cur_vblank;
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->last_dispatch =
+			READ_BREADCRUMB(dev_priv);
 
 	return ret;
 }
 
-
-int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
-{
-	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
-}
-
-int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
-{
-	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
-}
-
 /* Needs the lock as it touches the ring.
  */
 int i915_irq_emit(struct drm_device *dev, void *data,
@@ -381,14 +544,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
 	drm_i915_irq_emit_t *emit = data;
 	int result;
 
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
 	if (!dev_priv) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
-
+	mutex_lock(&dev->struct_mutex);
 	result = i915_emit_irq(dev);
+	mutex_unlock(&dev->struct_mutex);
 
 	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
 		DRM_ERROR("copy_to_user\n");
@@ -414,18 +578,74 @@ int i915_irq_wait(struct drm_device *dev, void *data,
 	return i915_wait_irq(dev, irqwait->irq_seq);
 }
 
-static void i915_enable_interrupt (struct drm_device *dev)
+int i915_enable_vblank(struct drm_device *dev, int plane)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u16 flag;
+	int pipe = i915_get_pipe(dev, plane);
+	u32	pipestat_reg = 0;
+	u32	pipestat;
+
+	switch (pipe) {
+	case 0:
+		pipestat_reg = PIPEASTAT;
+		i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
+		break;
+	case 1:
+		pipestat_reg = PIPEBSTAT;
+		i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+		break;
+	default:
+		DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
+			  pipe);
+		break;
+	}
+
+	if (pipestat_reg) {
+		pipestat = I915_READ(pipestat_reg);
+		if (IS_I965G(dev))
+			pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
+		else
+			pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
+		/* Clear any stale interrupt status */
+		pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+			     PIPE_VBLANK_INTERRUPT_STATUS);
+		I915_WRITE(pipestat_reg, pipestat);
+	}
+
+	return 0;
+}
 
-	flag = 0;
-	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
-		flag |= VSYNC_PIPEA_FLAG;
-	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
-		flag |= VSYNC_PIPEB_FLAG;
+void i915_disable_vblank(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe = i915_get_pipe(dev, plane);
+	u32	pipestat_reg = 0;
+	u32	pipestat;
+
+	switch (pipe) {
+	case 0:
+		pipestat_reg = PIPEASTAT;
+		i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
+		break;
+	case 1:
+		pipestat_reg = PIPEBSTAT;
+		i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+		break;
+	default:
+		DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
+			  pipe);
+		break;
+	}
 
-	I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
+	if (pipestat_reg) {
+		pipestat = I915_READ(pipestat_reg);
+		pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+			      PIPE_VBLANK_INTERRUPT_ENABLE);
+		/* Clear any stale interrupt status */
+		pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+			     PIPE_VBLANK_INTERRUPT_STATUS);
+		I915_WRITE(pipestat_reg, pipestat);
+	}
 }
 
 /* Set the vblank monitor pipe
@@ -434,22 +654,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	drm_i915_vblank_pipe_t *pipe = data;
 
 	if (!dev_priv) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
 
-	if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
-		DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
-		return -EINVAL;
-	}
-
-	dev_priv->vblank_pipe = pipe->pipe;
-
-	i915_enable_interrupt (dev);
-
 	return 0;
 }
 
@@ -458,19 +668,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_vblank_pipe_t *pipe = data;
-	u16 flag;
 
 	if (!dev_priv) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
 
-	flag = I915_READ(I915REG_INT_ENABLE_R);
-	pipe->pipe = 0;
-	if (flag & VSYNC_PIPEA_FLAG)
-		pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
-	if (flag & VSYNC_PIPEB_FLAG)
-		pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
+	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
 	return 0;
 }
@@ -484,11 +688,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_vblank_swap_t *swap = data;
 	drm_i915_vbl_swap_t *vbl_swap;
-	unsigned int pipe, seqtype, curseq;
+	unsigned int pipe, seqtype, curseq, plane;
 	unsigned long irqflags;
 	struct list_head *list;
+	int ret;
 
-	if (!dev_priv) {
+	if (!dev_priv || !dev_priv->sarea_priv) {
 		DRM_ERROR("%s called with no initialization\n", __func__);
 		return -EINVAL;
 	}
@@ -504,7 +709,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
-	pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+	plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+	pipe = i915_get_pipe(dev, plane);
 
 	seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
 
@@ -523,7 +729,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 
 	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
 
-	curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
+	/*
+	 * We take the ref here and put it when the swap actually completes
+	 * in the tasklet.
+	 */
+	ret = drm_vblank_get(dev, pipe);
+	if (ret)
+		return ret;
+	curseq = drm_vblank_count(dev, pipe);
 
 	if (seqtype == _DRM_VBLANK_RELATIVE)
 		swap->sequence += curseq;
@@ -533,6 +746,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 			swap->sequence = curseq + 1;
 		} else {
 			DRM_DEBUG("Missed target sequence\n");
+			drm_vblank_put(dev, pipe);
 			return -EINVAL;
 		}
 	}
@@ -543,7 +757,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 		vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
 
 		if (vbl_swap->drw_id == swap->drawable &&
-		    vbl_swap->pipe == pipe &&
+		    vbl_swap->plane == plane &&
 		    vbl_swap->sequence == swap->sequence) {
 			spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
 			DRM_DEBUG("Already scheduled\n");
@@ -555,6 +769,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 
 	if (dev_priv->swaps_pending >= 100) {
 		DRM_DEBUG("Too many swaps queued\n");
+		drm_vblank_put(dev, pipe);
 		return -EBUSY;
 	}
 
@@ -562,13 +777,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 
 	if (!vbl_swap) {
 		DRM_ERROR("Failed to allocate memory to queue swap\n");
+		drm_vblank_put(dev, pipe);
 		return -ENOMEM;
 	}
 
 	DRM_DEBUG("\n");
 
 	vbl_swap->drw_id = swap->drawable;
-	vbl_swap->pipe = pipe;
+	vbl_swap->plane = plane;
 	vbl_swap->sequence = swap->sequence;
 
 	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
@@ -587,37 +803,63 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-	I915_WRITE16(I915REG_HWSTAM, 0xfffe);
-	I915_WRITE16(I915REG_INT_MASK_R, 0x0);
-	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+	I915_WRITE(HWSTAM, 0xeffe);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
 }
 
-void i915_driver_irq_postinstall(struct drm_device * dev)
+int i915_driver_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int ret, num_pipes = 2;
 
 	spin_lock_init(&dev_priv->swaps_lock);
 	INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
 	dev_priv->swaps_pending = 0;
 
-	if (!dev_priv->vblank_pipe)
-		dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
-	i915_enable_interrupt(dev);
+	/* Set initial unmasked IRQs to just the selected vblank pipes. */
+	dev_priv->irq_mask_reg = ~0;
+
+	ret = drm_vblank_init(dev, num_pipes);
+	if (ret)
+		return ret;
+
+	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+	dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+	dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+	dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
+
+	I915_WRITE(IMR, dev_priv->irq_mask_reg);
+	I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+	(void) I915_READ(IER);
+
+	opregion_enable_asle(dev);
 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+
+	return 0;
 }
 
 void i915_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u16 temp;
+	u32 temp;
 
 	if (!dev_priv)
 		return;
 
-	I915_WRITE16(I915REG_HWSTAM, 0xffff);
-	I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
-	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+	dev_priv->vblank_pipe = 0;
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
 
-	temp = I915_READ16(I915REG_INT_IDENTITY_R);
-	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+	temp = I915_READ(PIPEASTAT);
+	I915_WRITE(PIPEASTAT, temp);
+	temp = I915_READ(PIPEBSTAT);
+	I915_WRITE(PIPEBSTAT, temp);
+	temp = I915_READ(IIR);
+	I915_WRITE(IIR, temp);
 }
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
new file mode 100644
index 000000000000..1787a0c7e3ab
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/acpi.h>
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_LBPC 0xf4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_SZ            (8*1024)
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET   0x100
+#define OPREGION_SWSCI_OFFSET  0x200
+#define OPREGION_ASLE_OFFSET   0x300
+#define OPREGION_VBT_OFFSET    0x1000
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI      (1<<0)
+#define MBOX_SWSCI     (1<<1)
+#define MBOX_ASLE      (1<<2)
+
+struct opregion_header {
+       u8 signature[16];
+       u32 size;
+       u32 opregion_ver;
+       u8 bios_ver[32];
+       u8 vbios_ver[16];
+       u8 driver_ver[16];
+       u32 mboxes;
+       u8 reserved[164];
+} __attribute__((packed));
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+       u32 drdy;       /* driver readiness */
+       u32 csts;       /* notification status */
+       u32 cevt;       /* current event */
+       u8 rsvd1[20];
+       u32 didl[8];    /* supported display devices ID list */
+       u32 cpdl[8];    /* currently presented display list */
+       u32 cadl[8];    /* currently active display list */
+       u32 nadl[8];    /* next active devices list */
+       u32 aslp;       /* ASL sleep time-out */
+       u32 tidx;       /* toggle table index */
+       u32 chpd;       /* current hotplug enable indicator */
+       u32 clid;       /* current lid state*/
+       u32 cdck;       /* current docking state */
+       u32 sxsw;       /* Sx state resume */
+       u32 evts;       /* ASL supported events */
+       u32 cnot;       /* current OS notification */
+       u32 nrdy;       /* driver status */
+       u8 rsvd2[60];
+} __attribute__((packed));
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+       u32 scic;       /* SWSCI command|status|data */
+       u32 parm;       /* command parameters */
+       u32 dslp;       /* driver sleep time-out */
+       u8 rsvd[244];
+} __attribute__((packed));
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+       u32 ardy;       /* driver readiness */
+       u32 aslc;       /* ASLE interrupt command */
+       u32 tche;       /* technology enabled indicator */
+       u32 alsi;       /* current ALS illuminance reading */
+       u32 bclp;       /* backlight brightness to set */
+       u32 pfit;       /* panel fitting state */
+       u32 cblv;       /* current brightness level */
+       u16 bclm[20];   /* backlight level duty cycle mapping table */
+       u32 cpfm;       /* current panel fitting mode */
+       u32 epfm;       /* enabled panel fitting modes */
+       u8 plut[74];    /* panel LUT and identifier */
+       u32 pfmb;       /* PWM freq and min brightness */
+       u8 rsvd[102];
+} __attribute__((packed));
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM     (1 << 0)
+#define ASLE_SET_BACKLIGHT     (1 << 1)
+#define ASLE_SET_PFIT          (1 << 2)
+#define ASLE_SET_PWM_FREQ      (1 << 3)
+#define ASLE_REQ_MSK           0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAIL    (2<<10)
+#define ASLE_BACKLIGHT_FAIL    (2<<12)
+#define ASLE_PFIT_FAIL         (2<<14)
+#define ASLE_PWM_FREQ_FAIL     (2<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID                (1<<31)
+#define ASLE_BCLP_MSK          (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID         (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID         (1<<31)
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 blc_pwm_ctl, blc_pwm_ctl2;
+
+	if (!(bclp & ASLE_BCLP_VALID))
+		return ASLE_BACKLIGHT_FAIL;
+
+	bclp &= ASLE_BCLP_MSK;
+	if (bclp < 0 || bclp > 255)
+		return ASLE_BACKLIGHT_FAIL;
+
+	blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+	blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+	blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
+
+	if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
+		pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
+	else
+		I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
+
+	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+	return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+{
+	/* alsi is the current ALS reading in lux. 0 indicates below sensor
+	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+	return 0;
+}
+
+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (pfmb & ASLE_PFMB_PWM_VALID) {
+		u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+		u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
+		blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
+		pwm = pwm >> 9;
+		/* FIXME - what do we do with the PWM? */
+	}
+	return 0;
+}
+
+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+{
+	/* Panel fitting is currently controlled by the X code, so this is a
+	   noop until modesetting support works fully */
+	if (!(pfit & ASLE_PFIT_VALID))
+		return ASLE_PFIT_FAIL;
+	return 0;
+}
+
+void opregion_asle_intr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 asle_stat = 0;
+	u32 asle_req;
+
+	if (!asle)
+		return;
+
+	asle_req = asle->aslc & ASLE_REQ_MSK;
+
+	if (!asle_req) {
+		DRM_DEBUG("non asle set request??\n");
+		return;
+	}
+
+	if (asle_req & ASLE_SET_ALS_ILLUM)
+		asle_stat |= asle_set_als_illum(dev, asle->alsi);
+
+	if (asle_req & ASLE_SET_BACKLIGHT)
+		asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+	if (asle_req & ASLE_SET_PFIT)
+		asle_stat |= asle_set_pfit(dev, asle->pfit);
+
+	if (asle_req & ASLE_SET_PWM_FREQ)
+		asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+
+	asle->aslc = asle_stat;
+}
+
+#define ASLE_ALS_EN    (1<<0)
+#define ASLE_BLC_EN    (1<<1)
+#define ASLE_PFIT_EN   (1<<2)
+#define ASLE_PFMB_EN   (1<<3)
+
+void opregion_enable_asle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+
+	if (asle) {
+		u32 pipeb_stats = I915_READ(PIPEBSTAT);
+		if (IS_MOBILE(dev)) {
+			/* Many devices trigger events with a write to the
+			   legacy backlight controller, so we need to ensure
+			   that it's able to generate interrupts */
+			I915_WRITE(PIPEBSTAT, pipeb_stats |=
+				   I915_LEGACY_BLC_EVENT_ENABLE);
+			i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT |
+					I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+		} else
+			i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
+
+		asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+			ASLE_PFMB_EN;
+		asle->ardy = 1;
+	}
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID            (1<<1)
+#define ACPI_EV_DOCK           (1<<2)
+
+static struct intel_opregion *system_opregion;
+
+int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
+			       void *data)
+{
+	/* The only video events relevant to opregion are 0x80. These indicate
+	   either a docking event, lid switch or display switch request. In
+	   Linux, these are handled by the dock, button and video drivers.
+	   We might want to fix the video driver to be opregion-aware in
+	   future, but right now we just indicate to the firmware that the
+	   request has been handled */
+
+	struct opregion_acpi *acpi;
+
+	if (!system_opregion)
+		return NOTIFY_DONE;
+
+	acpi = system_opregion->acpi;
+	acpi->csts = 0;
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block intel_opregion_notifier = {
+	.notifier_call = intel_opregion_video_event,
+};
+
+int intel_opregion_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	void *base;
+	u32 asls, mboxes;
+	int err = 0;
+
+	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+	DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+	if (asls == 0) {
+		DRM_DEBUG("ACPI OpRegion not supported!\n");
+		return -ENOTSUPP;
+	}
+
+	base = ioremap(asls, OPREGION_SZ);
+	if (!base)
+		return -ENOMEM;
+
+	opregion->header = base;
+	if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
+		DRM_DEBUG("opregion signature mismatch\n");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	mboxes = opregion->header->mboxes;
+	if (mboxes & MBOX_ACPI) {
+		DRM_DEBUG("Public ACPI methods supported\n");
+		opregion->acpi = base + OPREGION_ACPI_OFFSET;
+	} else {
+		DRM_DEBUG("Public ACPI methods not supported\n");
+		err = -ENOTSUPP;
+		goto err_out;
+	}
+	opregion->enabled = 1;
+
+	if (mboxes & MBOX_SWSCI) {
+		DRM_DEBUG("SWSCI supported\n");
+		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+	}
+	if (mboxes & MBOX_ASLE) {
+		DRM_DEBUG("ASLE supported\n");
+		opregion->asle = base + OPREGION_ASLE_OFFSET;
+	}
+
+	/* Notify BIOS we are ready to handle ACPI video ext notifs.
+	 * Right now, all the events are handled by the ACPI video module.
+	 * We don't actually need to do anything with them. */
+	opregion->acpi->csts = 0;
+	opregion->acpi->drdy = 1;
+
+	system_opregion = opregion;
+	register_acpi_notifier(&intel_opregion_notifier);
+
+	return 0;
+
+err_out:
+	iounmap(opregion->header);
+	opregion->header = NULL;
+	return err;
+}
+
+void intel_opregion_free(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->enabled)
+		return;
+
+	opregion->acpi->drdy = 0;
+
+	system_opregion = NULL;
+	unregister_acpi_notifier(&intel_opregion_notifier);
+
+	/* just clear all opregion memory pointers now */
+	iounmap(opregion->header);
+	opregion->header = NULL;
+	opregion->acpi = NULL;
+	opregion->swsci = NULL;
+	opregion->asle = NULL;
+
+	opregion->enabled = 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
new file mode 100644
index 000000000000..5c2d9f206d05
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -0,0 +1,1417 @@
+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _I915_REG_H_
+#define _I915_REG_H_
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ */
+#define INTEL_GMCH_CTRL		0x52
+#define INTEL_GMCH_ENABLED	0x4
+#define INTEL_GMCH_MEM_MASK	0x1
+#define INTEL_GMCH_MEM_64M	0x1
+#define INTEL_GMCH_MEM_128M	0
+
+#define INTEL_855_GMCH_GMS_MASK		(0x7 << 4)
+#define INTEL_855_GMCH_GMS_DISABLED	(0x0 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_1M	(0x1 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_4M	(0x2 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_8M	(0x3 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
+
+#define INTEL_915G_GMCH_GMS_STOLEN_48M	(0x6 << 4)
+#define INTEL_915G_GMCH_GMS_STOLEN_64M	(0x7 << 4)
+
+/* PCI config space */
+
+#define HPLLCC	0xc0 /* 855 only */
+#define   GC_CLOCK_CONTROL_MASK		(3 << 0)
+#define   GC_CLOCK_133_200		(0 << 0)
+#define   GC_CLOCK_100_200		(1 << 0)
+#define   GC_CLOCK_100_133		(2 << 0)
+#define   GC_CLOCK_166_250		(3 << 0)
+#define GCFGC	0xf0 /* 915+ only */
+#define   GC_LOW_FREQUENCY_ENABLE	(1 << 7)
+#define   GC_DISPLAY_CLOCK_190_200_MHZ	(0 << 4)
+#define   GC_DISPLAY_CLOCK_333_MHZ	(4 << 4)
+#define   GC_DISPLAY_CLOCK_MASK		(7 << 4)
+#define LBB	0xf4
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define   VGA_MSR_MEM_EN (1<<1)
+#define   VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define   VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define   VGA_GR_MEM_READ_MODE_SHIFT 3
+#define     VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define   VGA_GR_MEM_MODE_MASK 0xc
+#define   VGA_GR_MEM_MODE_SHIFT 2
+#define   VGA_GR_MEM_A0000_AFFFF 0
+#define   VGA_GR_MEM_A0000_BFFFF 1
+#define   VGA_GR_MEM_B0000_B7FFF 2
+#define   VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+
+#define MI_NOOP			MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+#define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+#define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH		MI_INSTR(0x04, 0)
+#define   MI_READ_FLUSH		(1 << 0)
+#define   MI_EXE_FLUSH		(1 << 1)
+#define   MI_NO_WRITE_FLUSH	(1 << 2)
+#define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
+#define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
+#define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+#define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
+#define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
+#define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
+#define   MI_STORE_DWORD_INDEX_SHIFT 2
+#define MI_LOAD_REGISTER_IMM	MI_INSTR(0x22, 1)
+#define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
+#define   MI_BATCH_NON_SECURE	(1)
+#define   MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define   SC_UPDATE_SCISSOR       (0x1<<1)
+#define   SC_ENABLE_MASK          (0x1<<0)
+#define   SC_ENABLE               (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define   SCI_YMIN_MASK      (0xffff<<16)
+#define   SCI_XMIN_MASK      (0xffff<<0)
+#define   SCI_YMAX_MASK      (0xffff<<16)
+#define   SCI_XMAX_MASK      (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
+#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20)
+#define   BLT_DEPTH_8			(0<<24)
+#define   BLT_DEPTH_16_565		(1<<24)
+#define   BLT_DEPTH_16_1555		(2<<24)
+#define   BLT_DEPTH_32			(3<<24)
+#define   BLT_ROP_GXCOPY		(0xcc<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED	(1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define   ASYNC_FLIP                (1<<22)
+#define   DISPLAY_PLANE_A           (0<<20)
+#define   DISPLAY_PLANE_B           (1<<20)
+
+/*
+ * Instruction and interrupt control regs
+ */
+
+#define PRB0_TAIL	0x02030
+#define PRB0_HEAD	0x02034
+#define PRB0_START	0x02038
+#define PRB0_CTL	0x0203c
+#define   TAIL_ADDR		0x001FFFF8
+#define   HEAD_WRAP_COUNT	0xFFE00000
+#define   HEAD_WRAP_ONE		0x00200000
+#define   HEAD_ADDR		0x001FFFFC
+#define   RING_NR_PAGES		0x001FF000
+#define   RING_REPORT_MASK	0x00000006
+#define   RING_REPORT_64K	0x00000002
+#define   RING_REPORT_128K	0x00000004
+#define   RING_NO_REPORT	0x00000000
+#define   RING_VALID_MASK	0x00000001
+#define   RING_VALID		0x00000001
+#define   RING_INVALID		0x00000000
+#define PRB1_TAIL	0x02040 /* 915+ only */
+#define PRB1_HEAD	0x02044 /* 915+ only */
+#define PRB1_START	0x02048 /* 915+ only */
+#define PRB1_CTL	0x0204c /* 915+ only */
+#define ACTHD_I965	0x02074
+#define HWS_PGA		0x02080
+#define HWS_ADDRESS_MASK	0xfffff000
+#define HWS_START_ADDRESS_SHIFT	4
+#define IPEIR		0x02088
+#define NOPID		0x02094
+#define HWSTAM		0x02098
+#define SCPD0		0x0209c /* 915+ only */
+#define IER		0x020a0
+#define IIR		0x020a4
+#define IMR		0x020a8
+#define ISR		0x020ac
+#define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
+#define   I915_DISPLAY_PORT_INTERRUPT			(1<<17)
+#define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT	(1<<15)
+#define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT	(1<<14)
+#define   I915_HWB_OOM_INTERRUPT			(1<<13)
+#define   I915_SYNC_STATUS_INTERRUPT			(1<<12)
+#define   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT	(1<<11)
+#define   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT	(1<<10)
+#define   I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT	(1<<9)
+#define   I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT	(1<<8)
+#define   I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT		(1<<7)
+#define   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT		(1<<6)
+#define   I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT		(1<<5)
+#define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT		(1<<4)
+#define   I915_DEBUG_INTERRUPT				(1<<2)
+#define   I915_USER_INTERRUPT				(1<<1)
+#define   I915_ASLE_INTERRUPT				(1<<0)
+#define EIR		0x020b0
+#define EMR		0x020b4
+#define ESR		0x020b8
+#define INSTPM	        0x020c0
+#define ACTHD	        0x020c8
+#define FW_BLC		0x020d8
+#define FW_BLC_SELF	0x020e0 /* 915+ only */
+#define MI_ARB_STATE	0x020e4 /* 915+ only */
+#define CACHE_MODE_0	0x02120 /* 915+ only */
+#define   CM0_MASK_SHIFT          16
+#define   CM0_IZ_OPT_DISABLE      (1<<6)
+#define   CM0_ZR_OPT_DISABLE      (1<<5)
+#define   CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define   CM0_COLOR_EVICT_DISABLE (1<<3)
+#define   CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define GFX_FLSH_CNTL	0x02170 /* 915+ only */
+
+/*
+ * Framebuffer compression (915+ only)
+ */
+
+#define FBC_CFB_BASE		0x03200 /* 4k page aligned */
+#define FBC_LL_BASE		0x03204 /* 4k page aligned */
+#define FBC_CONTROL		0x03208
+#define   FBC_CTL_EN		(1<<31)
+#define   FBC_CTL_PERIODIC	(1<<30)
+#define   FBC_CTL_INTERVAL_SHIFT (16)
+#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_CTL_STRIDE_SHIFT	(5)
+#define   FBC_CTL_FENCENO	(1<<0)
+#define FBC_COMMAND		0x0320c
+#define   FBC_CMD_COMPRESS	(1<<0)
+#define FBC_STATUS		0x03210
+#define   FBC_STAT_COMPRESSING	(1<<31)
+#define   FBC_STAT_COMPRESSED	(1<<30)
+#define   FBC_STAT_MODIFIED	(1<<29)
+#define   FBC_STAT_CURRENT_LINE	(1<<0)
+#define FBC_CONTROL2		0x03214
+#define   FBC_CTL_FENCE_DBL	(0<<4)
+#define   FBC_CTL_IDLE_IMM	(0<<2)
+#define   FBC_CTL_IDLE_FULL	(1<<2)
+#define   FBC_CTL_IDLE_LINE	(2<<2)
+#define   FBC_CTL_IDLE_DEBUG	(3<<2)
+#define   FBC_CTL_CPU_FENCE	(1<<1)
+#define   FBC_CTL_PLANEA	(0<<0)
+#define   FBC_CTL_PLANEB	(1<<0)
+#define FBC_FENCE_OFF		0x0321b
+
+#define FBC_LL_SIZE		(1536)
+
+/*
+ * GPIO regs
+ */
+#define GPIOA			0x5010
+#define GPIOB			0x5014
+#define GPIOC			0x5018
+#define GPIOD			0x501c
+#define GPIOE			0x5020
+#define GPIOF			0x5024
+#define GPIOG			0x5028
+#define GPIOH			0x502c
+# define GPIO_CLOCK_DIR_MASK		(1 << 0)
+# define GPIO_CLOCK_DIR_IN		(0 << 1)
+# define GPIO_CLOCK_DIR_OUT		(1 << 1)
+# define GPIO_CLOCK_VAL_MASK		(1 << 2)
+# define GPIO_CLOCK_VAL_OUT		(1 << 3)
+# define GPIO_CLOCK_VAL_IN		(1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+# define GPIO_DATA_DIR_MASK		(1 << 8)
+# define GPIO_DATA_DIR_IN		(0 << 9)
+# define GPIO_DATA_DIR_OUT		(1 << 9)
+# define GPIO_DATA_VAL_MASK		(1 << 10)
+# define GPIO_DATA_VAL_OUT		(1 << 11)
+# define GPIO_DATA_VAL_IN		(1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+
+/*
+ * Clock control & power management
+ */
+
+#define VGA0	0x6000
+#define VGA1	0x6004
+#define VGA_PD	0x6010
+#define   VGA0_PD_P2_DIV_4	(1 << 7)
+#define   VGA0_PD_P1_DIV_2	(1 << 5)
+#define   VGA0_PD_P1_SHIFT	0
+#define   VGA0_PD_P1_MASK	(0x1f << 0)
+#define   VGA1_PD_P2_DIV_4	(1 << 15)
+#define   VGA1_PD_P1_DIV_2	(1 << 13)
+#define   VGA1_PD_P1_SHIFT	8
+#define   VGA1_PD_P1_MASK	(0x1f << 8)
+#define DPLL_A	0x06014
+#define DPLL_B	0x06018
+#define   DPLL_VCO_ENABLE		(1 << 31)
+#define   DPLL_DVO_HIGH_SPEED		(1 << 30)
+#define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
+#define   DPLL_VGA_MODE_DIS		(1 << 28)
+#define   DPLLB_MODE_DAC_SERIAL		(1 << 26) /* i915 */
+#define   DPLLB_MODE_LVDS		(2 << 26) /* i915 */
+#define   DPLL_MODE_MASK		(3 << 26)
+#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define   DPLLB_LVDS_P2_CLOCK_DIV_14	(0 << 24) /* i915 */
+#define   DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24) /* i915 */
+#define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
+
+#define I915_FIFO_UNDERRUN_STATUS		(1UL<<31)
+#define I915_CRC_ERROR_ENABLE			(1UL<<29)
+#define I915_CRC_DONE_ENABLE			(1UL<<28)
+#define I915_GMBUS_EVENT_ENABLE			(1UL<<27)
+#define I915_VSYNC_INTERRUPT_ENABLE		(1UL<<25)
+#define I915_DISPLAY_LINE_COMPARE_ENABLE	(1UL<<24)
+#define I915_DPST_EVENT_ENABLE			(1UL<<23)
+#define I915_LEGACY_BLC_EVENT_ENABLE		(1UL<<22)
+#define I915_ODD_FIELD_INTERRUPT_ENABLE		(1UL<<21)
+#define I915_EVEN_FIELD_INTERRUPT_ENABLE	(1UL<<20)
+#define I915_START_VBLANK_INTERRUPT_ENABLE	(1UL<<18)	/* 965 or later */
+#define I915_VBLANK_INTERRUPT_ENABLE		(1UL<<17)
+#define I915_OVERLAY_UPDATED_ENABLE		(1UL<<16)
+#define I915_CRC_ERROR_INTERRUPT_STATUS		(1UL<<13)
+#define I915_CRC_DONE_INTERRUPT_STATUS		(1UL<<12)
+#define I915_GMBUS_INTERRUPT_STATUS		(1UL<<11)
+#define I915_VSYNC_INTERRUPT_STATUS		(1UL<<9)
+#define I915_DISPLAY_LINE_COMPARE_STATUS	(1UL<<8)
+#define I915_DPST_EVENT_STATUS			(1UL<<7)
+#define I915_LEGACY_BLC_EVENT_STATUS		(1UL<<6)
+#define I915_ODD_FIELD_INTERRUPT_STATUS		(1UL<<5)
+#define I915_EVEN_FIELD_INTERRUPT_STATUS	(1UL<<4)
+#define I915_START_VBLANK_INTERRUPT_STATUS	(1UL<<2)	/* 965 or later */
+#define I915_VBLANK_INTERRUPT_STATUS		(1UL<<1)
+#define I915_OVERLAY_UPDATED_STATUS		(1UL<<0)
+
+#define SRX_INDEX		0x3c4
+#define SRX_DATA		0x3c5
+#define SR01			1
+#define SR01_SCREEN_OFF		(1<<5)
+
+#define PPCR			0x61204
+#define PPCR_ON			(1<<0)
+
+#define DVOB			0x61140
+#define DVOB_ON			(1<<31)
+#define DVOC			0x61160
+#define DVOC_ON			(1<<31)
+#define LVDS			0x61180
+#define LVDS_ON			(1<<31)
+
+#define ADPA			0x61100
+#define ADPA_DPMS_MASK		(~(3<<10))
+#define ADPA_DPMS_ON		(0<<10)
+#define ADPA_DPMS_SUSPEND	(1<<10)
+#define ADPA_DPMS_STANDBY	(2<<10)
+#define ADPA_DPMS_OFF		(3<<10)
+
+#define RING_TAIL		0x00
+#define TAIL_ADDR		0x001FFFF8
+#define RING_HEAD		0x04
+#define HEAD_WRAP_COUNT		0xFFE00000
+#define HEAD_WRAP_ONE		0x00200000
+#define HEAD_ADDR		0x001FFFFC
+#define RING_START		0x08
+#define START_ADDR		0xFFFFF000
+#define RING_LEN		0x0C
+#define RING_NR_PAGES		0x001FF000
+#define RING_REPORT_MASK	0x00000006
+#define RING_REPORT_64K		0x00000002
+#define RING_REPORT_128K	0x00000004
+#define RING_NO_REPORT		0x00000000
+#define RING_VALID_MASK		0x00000001
+#define RING_VALID		0x00000001
+#define RING_INVALID		0x00000000
+
+/* Scratch pad debug 0 reg:
+ */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT	16
+/* i830, required in DVO non-gang */
+#define   PLL_P2_DIVIDE_BY_4		(1 << 23)
+#define   PLL_P1_DIVIDE_BY_TWO		(1 << 21) /* i830 */
+#define   PLL_REF_INPUT_DREFCLK		(0 << 13)
+#define   PLL_REF_INPUT_TVCLKINA	(1 << 13) /* i830 */
+#define   PLL_REF_INPUT_TVCLKINBC	(2 << 13) /* SDVO TVCLKIN */
+#define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define   PLL_REF_INPUT_MASK		(3 << 13)
+#define   PLL_LOAD_PULSE_PHASE_SHIFT		9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define   PLL_LOAD_PULSE_PHASE_MASK		(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define   DISPLAY_RATE_SELECT_FPA1		(1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define   SDVO_MULTIPLIER_MASK			0x000000ff
+#define   SDVO_MULTIPLIER_SHIFT_HIRES		4
+#define   SDVO_MULTIPLIER_SHIFT_VGA		0
+#define DPLL_A_MD 0x0601c /* 965+ only */
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+#define   DPLL_MD_UDI_DIVIDER_MASK		0x3f000000
+#define   DPLL_MD_UDI_DIVIDER_SHIFT		24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define   DPLL_MD_VGA_UDI_DIVIDER_MASK		0x003f0000
+#define   DPLL_MD_VGA_UDI_DIVIDER_SHIFT		16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define   DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00
+#define   DPLL_MD_UDI_MULTIPLIER_SHIFT		8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+#define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
+#define DPLL_B_MD 0x06020 /* 965+ only */
+#define FPA0	0x06040
+#define FPA1	0x06044
+#define FPB0	0x06048
+#define FPB1	0x0604c
+#define   FP_N_DIV_MASK		0x003f0000
+#define   FP_N_DIV_SHIFT		16
+#define   FP_M1_DIV_MASK	0x00003f00
+#define   FP_M1_DIV_SHIFT		 8
+#define   FP_M2_DIV_MASK	0x0000003f
+#define   FP_M2_DIV_SHIFT		 0
+#define DPLL_TEST	0x606c
+#define   DPLLB_TEST_SDVO_DIV_1		(0 << 22)
+#define   DPLLB_TEST_SDVO_DIV_2		(1 << 22)
+#define   DPLLB_TEST_SDVO_DIV_4		(2 << 22)
+#define   DPLLB_TEST_SDVO_DIV_MASK	(3 << 22)
+#define   DPLLB_TEST_N_BYPASS		(1 << 19)
+#define   DPLLB_TEST_M_BYPASS		(1 << 18)
+#define   DPLLB_INPUT_BUFFER_ENABLE	(1 << 16)
+#define   DPLLA_TEST_N_BYPASS		(1 << 3)
+#define   DPLLA_TEST_M_BYPASS		(1 << 2)
+#define   DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
+#define D_STATE		0x6104
+#define CG_2D_DIS	0x6200
+#define CG_3D_DIS	0x6204
+
+/*
+ * Palette regs
+ */
+
+#define PALETTE_A		0x0a000
+#define PALETTE_B		0x0a800
+
+/* MCH MMIO space */
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way.  It is not accessible from the CP register read instructions.
+ *
+ */
+#define MCHBAR_MIRROR_BASE	0x10000
+
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC			0x10200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL		(0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC	(1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED	(2 << 0)
+#define DCC_ADDRESSING_MODE_MASK			(3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE				(1 << 10)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3			0x10206
+#define C1DRB3			0x10606
+
+/*
+ * Overlay regs
+ */
+
+#define OVADD			0x30000
+#define DOVSTA			0x30008
+#define OC_BUF			(0x3<<20)
+#define OGAMC5			0x30010
+#define OGAMC4			0x30014
+#define OGAMC3			0x30018
+#define OGAMC2			0x3001c
+#define OGAMC1			0x30020
+#define OGAMC0			0x30024
+
+/*
+ * Display engine regs
+ */
+
+/* Pipe A timing regs */
+#define HTOTAL_A	0x60000
+#define HBLANK_A	0x60004
+#define HSYNC_A		0x60008
+#define VTOTAL_A	0x6000c
+#define VBLANK_A	0x60010
+#define VSYNC_A		0x60014
+#define PIPEASRC	0x6001c
+#define BCLRPAT_A	0x60020
+
+/* Pipe B timing regs */
+#define HTOTAL_B	0x61000
+#define HBLANK_B	0x61004
+#define HSYNC_B		0x61008
+#define VTOTAL_B	0x6100c
+#define VBLANK_B	0x61010
+#define VSYNC_B		0x61014
+#define PIPEBSRC	0x6101c
+#define BCLRPAT_B	0x61020
+
+/* VGA port control */
+#define ADPA			0x61100
+#define   ADPA_DAC_ENABLE	(1<<31)
+#define   ADPA_DAC_DISABLE	0
+#define   ADPA_PIPE_SELECT_MASK	(1<<30)
+#define   ADPA_PIPE_A_SELECT	0
+#define   ADPA_PIPE_B_SELECT	(1<<30)
+#define   ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define   ADPA_SETS_HVPOLARITY	0
+#define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define   ADPA_VSYNC_CNTL_ENABLE 0
+#define   ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define   ADPA_HSYNC_CNTL_ENABLE 0
+#define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define   ADPA_VSYNC_ACTIVE_LOW	0
+#define   ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define   ADPA_HSYNC_ACTIVE_LOW	0
+#define   ADPA_DPMS_MASK	(~(3<<10))
+#define   ADPA_DPMS_ON		(0<<10)
+#define   ADPA_DPMS_SUSPEND	(1<<10)
+#define   ADPA_DPMS_STANDBY	(2<<10)
+#define   ADPA_DPMS_OFF		(3<<10)
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN		0x61110
+#define   SDVOB_HOTPLUG_INT_EN			(1 << 26)
+#define   SDVOC_HOTPLUG_INT_EN			(1 << 25)
+#define   TV_HOTPLUG_INT_EN			(1 << 18)
+#define   CRT_HOTPLUG_INT_EN			(1 << 9)
+#define   CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+
+#define PORT_HOTPLUG_STAT	0x61114
+#define   CRT_HOTPLUG_INT_STATUS		(1 << 11)
+#define   TV_HOTPLUG_INT_STATUS			(1 << 10)
+#define   CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
+#define   CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
+#define   CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
+#define   CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+#define   SDVOC_HOTPLUG_INT_STATUS		(1 << 7)
+#define   SDVOB_HOTPLUG_INT_STATUS		(1 << 6)
+
+/* SDVO port control */
+#define SDVOB			0x61140
+#define SDVOC			0x61160
+#define   SDVO_ENABLE		(1 << 31)
+#define   SDVO_PIPE_B_SELECT	(1 << 30)
+#define   SDVO_STALL_SELECT	(1 << 29)
+#define   SDVO_INTERRUPT_ENABLE	(1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define   SDVO_PORT_MULTIPLY_MASK	(7 << 23)
+#define   SDVO_PORT_MULTIPLY_SHIFT		23
+#define   SDVO_PHASE_SELECT_MASK	(15 << 19)
+#define   SDVO_PHASE_SELECT_DEFAULT	(6 << 19)
+#define   SDVO_CLOCK_OUTPUT_INVERT	(1 << 18)
+#define   SDVOC_GANG_MODE		(1 << 16)
+#define   SDVO_BORDER_ENABLE		(1 << 7)
+#define   SDVOB_PCIE_CONCURRENCY	(1 << 3)
+#define   SDVO_DETECTED			(1 << 2)
+/* Bits to be preserved when writing */
+#define   SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
+#define   SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
+
+/* DVO port control */
+#define DVOA			0x61120
+#define DVOB			0x61140
+#define DVOC			0x61160
+#define   DVO_ENABLE			(1 << 31)
+#define   DVO_PIPE_B_SELECT		(1 << 30)
+#define   DVO_PIPE_STALL_UNUSED		(0 << 28)
+#define   DVO_PIPE_STALL		(1 << 28)
+#define   DVO_PIPE_STALL_TV		(2 << 28)
+#define   DVO_PIPE_STALL_MASK		(3 << 28)
+#define   DVO_USE_VGA_SYNC		(1 << 15)
+#define   DVO_DATA_ORDER_I740		(0 << 14)
+#define   DVO_DATA_ORDER_FP		(1 << 14)
+#define   DVO_VSYNC_DISABLE		(1 << 11)
+#define   DVO_HSYNC_DISABLE		(1 << 10)
+#define   DVO_VSYNC_TRISTATE		(1 << 9)
+#define   DVO_HSYNC_TRISTATE		(1 << 8)
+#define   DVO_BORDER_ENABLE		(1 << 7)
+#define   DVO_DATA_ORDER_GBRG		(1 << 6)
+#define   DVO_DATA_ORDER_RGGB		(0 << 6)
+#define   DVO_DATA_ORDER_GBRG_ERRATA	(0 << 6)
+#define   DVO_DATA_ORDER_RGGB_ERRATA	(1 << 6)
+#define   DVO_VSYNC_ACTIVE_HIGH		(1 << 4)
+#define   DVO_HSYNC_ACTIVE_HIGH		(1 << 3)
+#define   DVO_BLANK_ACTIVE_HIGH		(1 << 2)
+#define   DVO_OUTPUT_CSTATE_PIXELS	(1 << 1)	/* SDG only */
+#define   DVO_OUTPUT_SOURCE_SIZE_PIXELS	(1 << 0)	/* SDG only */
+#define   DVO_PRESERVE_MASK		(0x7<<24)
+#define DVOA_SRCDIM		0x61124
+#define DVOB_SRCDIM		0x61144
+#define DVOC_SRCDIM		0x61164
+#define   DVO_SRCDIM_HORIZONTAL_SHIFT	12
+#define   DVO_SRCDIM_VERTICAL_SHIFT	0
+
+/* LVDS port control */
+#define LVDS			0x61180
+/*
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define   LVDS_PORT_EN			(1 << 31)
+/* Selects pipe B for LVDS data.  Must be set on pre-965. */
+#define   LVDS_PIPEB_SELECT		(1 << 30)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define   LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+#define   LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+#define   LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define   LVDS_A3_POWER_MASK		(3 << 6)
+#define   LVDS_A3_POWER_DOWN		(0 << 6)
+#define   LVDS_A3_POWER_UP		(3 << 6)
+/*
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define   LVDS_CLKB_POWER_MASK		(3 << 4)
+#define   LVDS_CLKB_POWER_DOWN		(0 << 4)
+#define   LVDS_CLKB_POWER_UP		(3 << 4)
+/*
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define   LVDS_B0B3_POWER_MASK		(3 << 2)
+#define   LVDS_B0B3_POWER_DOWN		(0 << 2)
+#define   LVDS_B0B3_POWER_UP		(3 << 2)
+
+/* Panel power sequencing */
+#define PP_STATUS	0x61200
+#define   PP_ON		(1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define   PP_READY		(1 << 30)
+#define   PP_SEQUENCE_NONE	(0 << 28)
+#define   PP_SEQUENCE_ON	(1 << 28)
+#define   PP_SEQUENCE_OFF	(2 << 28)
+#define   PP_SEQUENCE_MASK	0x30000000
+#define PP_CONTROL	0x61204
+#define   POWER_TARGET_ON	(1 << 0)
+#define PP_ON_DELAYS	0x61208
+#define PP_OFF_DELAYS	0x6120c
+#define PP_DIVISOR	0x61210
+
+/* Panel fitting */
+#define PFIT_CONTROL	0x61230
+#define   PFIT_ENABLE		(1 << 31)
+#define   PFIT_PIPE_MASK	(3 << 29)
+#define   PFIT_PIPE_SHIFT	29
+#define   VERT_INTERP_DISABLE	(0 << 10)
+#define   VERT_INTERP_BILINEAR	(1 << 10)
+#define   VERT_INTERP_MASK	(3 << 10)
+#define   VERT_AUTO_SCALE	(1 << 9)
+#define   HORIZ_INTERP_DISABLE	(0 << 6)
+#define   HORIZ_INTERP_BILINEAR	(1 << 6)
+#define   HORIZ_INTERP_MASK	(3 << 6)
+#define   HORIZ_AUTO_SCALE	(1 << 5)
+#define   PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define PFIT_PGM_RATIOS	0x61234
+#define   PFIT_VERT_SCALE_MASK			0xfff00000
+#define   PFIT_HORIZ_SCALE_MASK			0x0000fff0
+#define PFIT_AUTO_RATIOS 0x61238
+
+/* Backlight control */
+#define BLC_PWM_CTL		0x61254
+#define   BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
+#define BLC_PWM_CTL2		0x61250 /* 965+ only */
+#define   BLM_COMBINATION_MODE (1 << 30)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define   BACKLIGHT_MODULATION_FREQ_MASK		(0x7fff << 17)
+#define   BLM_LEGACY_MODE				(1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define   BACKLIGHT_DUTY_CYCLE_SHIFT		(0)
+#define   BACKLIGHT_DUTY_CYCLE_MASK		(0xffff)
+
+/* TV port control */
+#define TV_CTL			0x68000
+/** Enables the TV encoder */
+# define TV_ENC_ENABLE			(1 << 31)
+/** Sources the TV encoder input from pipe B instead of A. */
+# define TV_ENC_PIPEB_SELECT		(1 << 30)
+/** Outputs composite video (DAC A only) */
+# define TV_ENC_OUTPUT_COMPOSITE	(0 << 28)
+/** Outputs SVideo video (DAC B/C) */
+# define TV_ENC_OUTPUT_SVIDEO		(1 << 28)
+/** Outputs Component video (DAC A/B/C) */
+# define TV_ENC_OUTPUT_COMPONENT	(2 << 28)
+/** Outputs Composite and SVideo (DAC A/B/C) */
+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE	(3 << 28)
+# define TV_TRILEVEL_SYNC		(1 << 21)
+/** Enables slow sync generation (945GM only) */
+# define TV_SLOW_SYNC			(1 << 20)
+/** Selects 4x oversampling for 480i and 576p */
+# define TV_OVERSAMPLE_4X		(0 << 18)
+/** Selects 2x oversampling for 720p and 1080i */
+# define TV_OVERSAMPLE_2X		(1 << 18)
+/** Selects no oversampling for 1080p */
+# define TV_OVERSAMPLE_NONE		(2 << 18)
+/** Selects 8x oversampling */
+# define TV_OVERSAMPLE_8X		(3 << 18)
+/** Selects progressive mode rather than interlaced */
+# define TV_PROGRESSIVE			(1 << 17)
+/** Sets the colorburst to PAL mode.  Required for non-M PAL modes. */
+# define TV_PAL_BURST			(1 << 16)
+/** Field for setting delay of Y compared to C */
+# define TV_YC_SKEW_MASK		(7 << 12)
+/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
+# define TV_ENC_SDP_FIX			(1 << 11)
+/**
+ * Enables a fix for the 915GM only.
+ *
+ * Not sure what it does.
+ */
+# define TV_ENC_C0_FIX			(1 << 10)
+/** Bits that must be preserved by software */
+# define TV_CTL_SAVE			((3 << 8) | (3 << 6))
+# define TV_FUSE_STATE_MASK		(3 << 4)
+/** Read-only state that reports all features enabled */
+# define TV_FUSE_STATE_ENABLED		(0 << 4)
+/** Read-only state that reports that Macrovision is disabled in hardware*/
+# define TV_FUSE_STATE_NO_MACROVISION	(1 << 4)
+/** Read-only state that reports that TV-out is disabled in hardware. */
+# define TV_FUSE_STATE_DISABLED		(2 << 4)
+/** Normal operation */
+# define TV_TEST_MODE_NORMAL		(0 << 0)
+/** Encoder test pattern 1 - combo pattern */
+# define TV_TEST_MODE_PATTERN_1		(1 << 0)
+/** Encoder test pattern 2 - full screen vertical 75% color bars */
+# define TV_TEST_MODE_PATTERN_2		(2 << 0)
+/** Encoder test pattern 3 - full screen horizontal 75% color bars */
+# define TV_TEST_MODE_PATTERN_3		(3 << 0)
+/** Encoder test pattern 4 - random noise */
+# define TV_TEST_MODE_PATTERN_4		(4 << 0)
+/** Encoder test pattern 5 - linear color ramps */
+# define TV_TEST_MODE_PATTERN_5		(5 << 0)
+/**
+ * This test mode forces the DACs to 50% of full output.
+ *
+ * This is used for load detection in combination with TVDAC_SENSE_MASK
+ */
+# define TV_TEST_MODE_MONITOR_DETECT	(7 << 0)
+# define TV_TEST_MODE_MASK		(7 << 0)
+
+#define TV_DAC			0x68004
+/**
+ * Reports that DAC state change logic has reported change (RO).
+ *
+ * This gets cleared when TV_DAC_STATE_EN is cleared
+*/
+# define TVDAC_STATE_CHG		(1 << 31)
+# define TVDAC_SENSE_MASK		(7 << 28)
+/** Reports that DAC A voltage is above the detect threshold */
+# define TVDAC_A_SENSE			(1 << 30)
+/** Reports that DAC B voltage is above the detect threshold */
+# define TVDAC_B_SENSE			(1 << 29)
+/** Reports that DAC C voltage is above the detect threshold */
+# define TVDAC_C_SENSE			(1 << 28)
+/**
+ * Enables DAC state detection logic, for load-based TV detection.
+ *
+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
+ * to off, for load detection to work.
+ */
+# define TVDAC_STATE_CHG_EN		(1 << 27)
+/** Sets the DAC A sense value to high */
+# define TVDAC_A_SENSE_CTL		(1 << 26)
+/** Sets the DAC B sense value to high */
+# define TVDAC_B_SENSE_CTL		(1 << 25)
+/** Sets the DAC C sense value to high */
+# define TVDAC_C_SENSE_CTL		(1 << 24)
+/** Overrides the ENC_ENABLE and DAC voltage levels */
+# define DAC_CTL_OVERRIDE		(1 << 7)
+/** Sets the slew rate.  Must be preserved in software */
+# define ENC_TVDAC_SLEW_FAST		(1 << 6)
+# define DAC_A_1_3_V			(0 << 4)
+# define DAC_A_1_1_V			(1 << 4)
+# define DAC_A_0_7_V			(2 << 4)
+# define DAC_A_OFF			(3 << 4)
+# define DAC_B_1_3_V			(0 << 2)
+# define DAC_B_1_1_V			(1 << 2)
+# define DAC_B_0_7_V			(2 << 2)
+# define DAC_B_OFF			(3 << 2)
+# define DAC_C_1_3_V			(0 << 0)
+# define DAC_C_1_1_V			(1 << 0)
+# define DAC_C_0_7_V			(2 << 0)
+# define DAC_C_OFF			(3 << 0)
+
+/**
+ * CSC coefficients are stored in a floating point format with 9 bits of
+ * mantissa and 2 or 3 bits of exponent.  The exponent is represented as 2**-n,
+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
+ * -1 (0x3) being the only legal negative value.
+ */
+#define TV_CSC_Y		0x68010
+# define TV_RY_MASK			0x07ff0000
+# define TV_RY_SHIFT			16
+# define TV_GY_MASK			0x00000fff
+# define TV_GY_SHIFT			0
+
+#define TV_CSC_Y2		0x68014
+# define TV_BY_MASK			0x07ff0000
+# define TV_BY_SHIFT			16
+/**
+ * Y attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AY_MASK			0x000003ff
+# define TV_AY_SHIFT			0
+
+#define TV_CSC_U		0x68018
+# define TV_RU_MASK			0x07ff0000
+# define TV_RU_SHIFT			16
+# define TV_GU_MASK			0x000007ff
+# define TV_GU_SHIFT			0
+
+#define TV_CSC_U2		0x6801c
+# define TV_BU_MASK			0x07ff0000
+# define TV_BU_SHIFT			16
+/**
+ * U attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AU_MASK			0x000003ff
+# define TV_AU_SHIFT			0
+
+#define TV_CSC_V		0x68020
+# define TV_RV_MASK			0x0fff0000
+# define TV_RV_SHIFT			16
+# define TV_GV_MASK			0x000007ff
+# define TV_GV_SHIFT			0
+
+#define TV_CSC_V2		0x68024
+# define TV_BV_MASK			0x07ff0000
+# define TV_BV_SHIFT			16
+/**
+ * V attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AV_MASK			0x000007ff
+# define TV_AV_SHIFT			0
+
+#define TV_CLR_KNOBS		0x68028
+/** 2s-complement brightness adjustment */
+# define TV_BRIGHTNESS_MASK		0xff000000
+# define TV_BRIGHTNESS_SHIFT		24
+/** Contrast adjustment, as a 2.6 unsigned floating point number */
+# define TV_CONTRAST_MASK		0x00ff0000
+# define TV_CONTRAST_SHIFT		16
+/** Saturation adjustment, as a 2.6 unsigned floating point number */
+# define TV_SATURATION_MASK		0x0000ff00
+# define TV_SATURATION_SHIFT		8
+/** Hue adjustment, as an integer phase angle in degrees */
+# define TV_HUE_MASK			0x000000ff
+# define TV_HUE_SHIFT			0
+
+#define TV_CLR_LEVEL		0x6802c
+/** Controls the DAC level for black */
+# define TV_BLACK_LEVEL_MASK		0x01ff0000
+# define TV_BLACK_LEVEL_SHIFT		16
+/** Controls the DAC level for blanking */
+# define TV_BLANK_LEVEL_MASK		0x000001ff
+# define TV_BLANK_LEVEL_SHIFT		0
+
+#define TV_H_CTL_1		0x68030
+/** Number of pixels in the hsync. */
+# define TV_HSYNC_END_MASK		0x1fff0000
+# define TV_HSYNC_END_SHIFT		16
+/** Total number of pixels minus one in the line (display and blanking). */
+# define TV_HTOTAL_MASK			0x00001fff
+# define TV_HTOTAL_SHIFT		0
+
+#define TV_H_CTL_2		0x68034
+/** Enables the colorburst (needed for non-component color) */
+# define TV_BURST_ENA			(1 << 31)
+/** Offset of the colorburst from the start of hsync, in pixels minus one. */
+# define TV_HBURST_START_SHIFT		16
+# define TV_HBURST_START_MASK		0x1fff0000
+/** Length of the colorburst */
+# define TV_HBURST_LEN_SHIFT		0
+# define TV_HBURST_LEN_MASK		0x0001fff
+
+#define TV_H_CTL_3		0x68038
+/** End of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_END_SHIFT		16
+# define TV_HBLANK_END_MASK		0x1fff0000
+/** Start of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_START_SHIFT		0
+# define TV_HBLANK_START_MASK		0x0001fff
+
+#define TV_V_CTL_1		0x6803c
+/** XXX */
+# define TV_NBR_END_SHIFT		16
+# define TV_NBR_END_MASK		0x07ff0000
+/** XXX */
+# define TV_VI_END_F1_SHIFT		8
+# define TV_VI_END_F1_MASK		0x00003f00
+/** XXX */
+# define TV_VI_END_F2_SHIFT		0
+# define TV_VI_END_F2_MASK		0x0000003f
+
+#define TV_V_CTL_2		0x68040
+/** Length of vsync, in half lines */
+# define TV_VSYNC_LEN_MASK		0x07ff0000
+# define TV_VSYNC_LEN_SHIFT		16
+/** Offset of the start of vsync in field 1, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F1_MASK		0x00007f00
+# define TV_VSYNC_START_F1_SHIFT	8
+/**
+ * Offset of the start of vsync in field 2, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F2_MASK		0x0000007f
+# define TV_VSYNC_START_F2_SHIFT	0
+
+#define TV_V_CTL_3		0x68044
+/** Enables generation of the equalization signal */
+# define TV_EQUAL_ENA			(1 << 31)
+/** Length of vsync, in half lines */
+# define TV_VEQ_LEN_MASK		0x007f0000
+# define TV_VEQ_LEN_SHIFT		16
+/** Offset of the start of equalization in field 1, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F1_MASK		0x0007f00
+# define TV_VEQ_START_F1_SHIFT		8
+/**
+ * Offset of the start of equalization in field 2, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F2_MASK		0x000007f
+# define TV_VEQ_START_F2_SHIFT		0
+
+#define TV_V_CTL_4		0x68048
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F1_MASK	0x003f0000
+# define TV_VBURST_START_F1_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F1_MASK		0x000000ff
+# define TV_VBURST_END_F1_SHIFT		0
+
+#define TV_V_CTL_5		0x6804c
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F2_MASK	0x003f0000
+# define TV_VBURST_START_F2_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F2_MASK		0x000000ff
+# define TV_VBURST_END_F2_SHIFT		0
+
+#define TV_V_CTL_6		0x68050
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F3_MASK	0x003f0000
+# define TV_VBURST_START_F3_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F3_MASK		0x000000ff
+# define TV_VBURST_END_F3_SHIFT		0
+
+#define TV_V_CTL_7		0x68054
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F4_MASK	0x003f0000
+# define TV_VBURST_START_F4_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F4_MASK		0x000000ff
+# define TV_VBURST_END_F4_SHIFT		0
+
+#define TV_SC_CTL_1		0x68060
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA1_EN			(1 << 31)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA2_EN			(1 << 30)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA3_EN			(1 << 29)
+/** Sets the subcarrier DDA to reset frequency every other field */
+# define TV_SC_RESET_EVERY_2		(0 << 24)
+/** Sets the subcarrier DDA to reset frequency every fourth field */
+# define TV_SC_RESET_EVERY_4		(1 << 24)
+/** Sets the subcarrier DDA to reset frequency every eighth field */
+# define TV_SC_RESET_EVERY_8		(2 << 24)
+/** Sets the subcarrier DDA to never reset the frequency */
+# define TV_SC_RESET_NEVER		(3 << 24)
+/** Sets the peak amplitude of the colorburst.*/
+# define TV_BURST_LEVEL_MASK		0x00ff0000
+# define TV_BURST_LEVEL_SHIFT		16
+/** Sets the increment of the first subcarrier phase generation DDA */
+# define TV_SCDDA1_INC_MASK		0x00000fff
+# define TV_SCDDA1_INC_SHIFT		0
+
+#define TV_SC_CTL_2		0x68064
+/** Sets the rollover for the second subcarrier phase generation DDA */
+# define TV_SCDDA2_SIZE_MASK		0x7fff0000
+# define TV_SCDDA2_SIZE_SHIFT		16
+/** Sets the increent of the second subcarrier phase generation DDA */
+# define TV_SCDDA2_INC_MASK		0x00007fff
+# define TV_SCDDA2_INC_SHIFT		0
+
+#define TV_SC_CTL_3		0x68068
+/** Sets the rollover for the third subcarrier phase generation DDA */
+# define TV_SCDDA3_SIZE_MASK		0x7fff0000
+# define TV_SCDDA3_SIZE_SHIFT		16
+/** Sets the increent of the third subcarrier phase generation DDA */
+# define TV_SCDDA3_INC_MASK		0x00007fff
+# define TV_SCDDA3_INC_SHIFT		0
+
+#define TV_WIN_POS		0x68070
+/** X coordinate of the display from the start of horizontal active */
+# define TV_XPOS_MASK			0x1fff0000
+# define TV_XPOS_SHIFT			16
+/** Y coordinate of the display from the start of vertical active (NBR) */
+# define TV_YPOS_MASK			0x00000fff
+# define TV_YPOS_SHIFT			0
+
+#define TV_WIN_SIZE		0x68074
+/** Horizontal size of the display window, measured in pixels*/
+# define TV_XSIZE_MASK			0x1fff0000
+# define TV_XSIZE_SHIFT			16
+/**
+ * Vertical size of the display window, measured in pixels.
+ *
+ * Must be even for interlaced modes.
+ */
+# define TV_YSIZE_MASK			0x00000fff
+# define TV_YSIZE_SHIFT			0
+
+#define TV_FILTER_CTL_1		0x68080
+/**
+ * Enables automatic scaling calculation.
+ *
+ * If set, the rest of the registers are ignored, and the calculated values can
+ * be read back from the register.
+ */
+# define TV_AUTO_SCALE			(1 << 31)
+/**
+ * Disables the vertical filter.
+ *
+ * This is required on modes more than 1024 pixels wide */
+# define TV_V_FILTER_BYPASS		(1 << 29)
+/** Enables adaptive vertical filtering */
+# define TV_VADAPT			(1 << 28)
+# define TV_VADAPT_MODE_MASK		(3 << 26)
+/** Selects the least adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_LEAST		(0 << 26)
+/** Selects the moderately adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MODERATE	(1 << 26)
+/** Selects the most adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MOST		(3 << 26)
+/**
+ * Sets the horizontal scaling factor.
+ *
+ * This should be the fractional part of the horizontal scaling factor divided
+ * by the oversampling rate.  TV_HSCALE should be less than 1, and set to:
+ *
+ * (src width - 1) / ((oversample * dest width) - 1)
+ */
+# define TV_HSCALE_FRAC_MASK		0x00003fff
+# define TV_HSCALE_FRAC_SHIFT		0
+
+#define TV_FILTER_CTL_2		0x68084
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
+ */
+# define TV_VSCALE_INT_MASK		0x00038000
+# define TV_VSCALE_INT_SHIFT		15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * \sa TV_VSCALE_INT_MASK
+ */
+# define TV_VSCALE_FRAC_MASK		0x00007fff
+# define TV_VSCALE_FRAC_SHIFT		0
+
+#define TV_FILTER_CTL_3		0x68088
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ */
+# define TV_VSCALE_IP_INT_MASK		0x00038000
+# define TV_VSCALE_IP_INT_SHIFT		15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ *
+ * \sa TV_VSCALE_IP_INT_MASK
+ */
+# define TV_VSCALE_IP_FRAC_MASK		0x00007fff
+# define TV_VSCALE_IP_FRAC_SHIFT		0
+
+#define TV_CC_CONTROL		0x68090
+# define TV_CC_ENABLE			(1 << 31)
+/**
+ * Specifies which field to send the CC data in.
+ *
+ * CC data is usually sent in field 0.
+ */
+# define TV_CC_FID_MASK			(1 << 27)
+# define TV_CC_FID_SHIFT		27
+/** Sets the horizontal position of the CC data.  Usually 135. */
+# define TV_CC_HOFF_MASK		0x03ff0000
+# define TV_CC_HOFF_SHIFT		16
+/** Sets the vertical position of the CC data.  Usually 21 */
+# define TV_CC_LINE_MASK		0x0000003f
+# define TV_CC_LINE_SHIFT		0
+
+#define TV_CC_DATA		0x68094
+# define TV_CC_RDY			(1 << 31)
+/** Second word of CC data to be transmitted. */
+# define TV_CC_DATA_2_MASK		0x007f0000
+# define TV_CC_DATA_2_SHIFT		16
+/** First word of CC data to be transmitted. */
+# define TV_CC_DATA_1_MASK		0x0000007f
+# define TV_CC_DATA_1_SHIFT		0
+
+#define TV_H_LUMA_0		0x68100
+#define TV_H_LUMA_59		0x681ec
+#define TV_H_CHROMA_0		0x68200
+#define TV_H_CHROMA_59		0x682ec
+#define TV_V_LUMA_0		0x68300
+#define TV_V_LUMA_42		0x683a8
+#define TV_V_CHROMA_0		0x68400
+#define TV_V_CHROMA_42		0x684a8
+
+/* Display & cursor control */
+
+/* Pipe A */
+#define PIPEADSL		0x70000
+#define PIPEACONF		0x70008
+#define   PIPEACONF_ENABLE	(1<<31)
+#define   PIPEACONF_DISABLE	0
+#define   PIPEACONF_DOUBLE_WIDE	(1<<30)
+#define   I965_PIPECONF_ACTIVE	(1<<30)
+#define   PIPEACONF_SINGLE_WIDE	0
+#define   PIPEACONF_PIPE_UNLOCKED 0
+#define   PIPEACONF_PIPE_LOCKED	(1<<25)
+#define   PIPEACONF_PALETTE	0
+#define   PIPEACONF_GAMMA		(1<<24)
+#define   PIPECONF_FORCE_BORDER	(1<<25)
+#define   PIPECONF_PROGRESSIVE	(0 << 21)
+#define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+#define   PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
+#define PIPEASTAT		0x70024
+#define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
+#define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
+#define   PIPE_CRC_DONE_ENABLE			(1UL<<28)
+#define   PIPE_GMBUS_EVENT_ENABLE		(1UL<<27)
+#define   PIPE_HOTPLUG_INTERRUPT_ENABLE		(1UL<<26)
+#define   PIPE_VSYNC_INTERRUPT_ENABLE		(1UL<<25)
+#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE	(1UL<<24)
+#define   PIPE_DPST_EVENT_ENABLE		(1UL<<23)
+#define   PIPE_LEGACY_BLC_EVENT_ENABLE		(1UL<<22)
+#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE	(1UL<<21)
+#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE	(1UL<<20)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE	(1UL<<18) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_ENABLE	(1UL<<18) /* 965 or later */
+#define   PIPE_VBLANK_INTERRUPT_ENABLE		(1UL<<17)
+#define   PIPE_OVERLAY_UPDATED_ENABLE		(1UL<<16)
+#define   PIPE_CRC_ERROR_INTERRUPT_STATUS	(1UL<<13)
+#define   PIPE_CRC_DONE_INTERRUPT_STATUS	(1UL<<12)
+#define   PIPE_GMBUS_INTERRUPT_STATUS		(1UL<<11)
+#define   PIPE_HOTPLUG_INTERRUPT_STATUS		(1UL<<10)
+#define   PIPE_VSYNC_INTERRUPT_STATUS		(1UL<<9)
+#define   PIPE_DISPLAY_LINE_COMPARE_STATUS	(1UL<<8)
+#define   PIPE_DPST_EVENT_STATUS		(1UL<<7)
+#define   PIPE_LEGACY_BLC_EVENT_STATUS		(1UL<<6)
+#define   PIPE_ODD_FIELD_INTERRUPT_STATUS	(1UL<<5)
+#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS	(1UL<<4)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS	(1UL<<2) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL<<2) /* 965 or later */
+#define   PIPE_VBLANK_INTERRUPT_STATUS		(1UL<<1)
+#define   PIPE_OVERLAY_UPDATED_STATUS		(1UL<<0)
+
+#define DSPARB			0x70030
+#define   DSPARB_CSTART_MASK	(0x7f << 7)
+#define   DSPARB_CSTART_SHIFT	7
+#define   DSPARB_BSTART_MASK	(0x7f)
+#define   DSPARB_BSTART_SHIFT	0
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ *  do {
+ *    high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ *             PIPE_FRAME_HIGH_SHIFT;
+ *    low1 =  ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ *             PIPE_FRAME_LOW_SHIFT);
+ *    high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ *             PIPE_FRAME_HIGH_SHIFT);
+ *  } while (high1 != high2);
+ *  frame = (high1 << 8) | low1;
+ */
+#define PIPEAFRAMEHIGH          0x70040
+#define   PIPE_FRAME_HIGH_MASK    0x0000ffff
+#define   PIPE_FRAME_HIGH_SHIFT   0
+#define PIPEAFRAMEPIXEL         0x70044
+#define   PIPE_FRAME_LOW_MASK     0xff000000
+#define   PIPE_FRAME_LOW_SHIFT    24
+#define   PIPE_PIXEL_MASK         0x00ffffff
+#define   PIPE_PIXEL_SHIFT        0
+
+/* Cursor A & B regs */
+#define CURACNTR		0x70080
+#define   CURSOR_MODE_DISABLE   0x00
+#define   CURSOR_MODE_64_32B_AX 0x07
+#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define   MCURSOR_GAMMA_ENABLE  (1 << 26)
+#define CURABASE		0x70084
+#define CURAPOS			0x70088
+#define   CURSOR_POS_MASK       0x007FF
+#define   CURSOR_POS_SIGN       0x8000
+#define   CURSOR_X_SHIFT        0
+#define   CURSOR_Y_SHIFT        16
+#define CURBCNTR		0x700c0
+#define CURBBASE		0x700c4
+#define CURBPOS			0x700c8
+
+/* Display A control */
+#define DSPACNTR                0x70180
+#define   DISPLAY_PLANE_ENABLE			(1<<31)
+#define   DISPLAY_PLANE_DISABLE			0
+#define   DISPPLANE_GAMMA_ENABLE		(1<<30)
+#define   DISPPLANE_GAMMA_DISABLE		0
+#define   DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
+#define   DISPPLANE_8BPP			(0x2<<26)
+#define   DISPPLANE_15_16BPP			(0x4<<26)
+#define   DISPPLANE_16BPP			(0x5<<26)
+#define   DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
+#define   DISPPLANE_32BPP			(0x7<<26)
+#define   DISPPLANE_STEREO_ENABLE		(1<<25)
+#define   DISPPLANE_STEREO_DISABLE		0
+#define   DISPPLANE_SEL_PIPE_MASK		(1<<24)
+#define   DISPPLANE_SEL_PIPE_A			0
+#define   DISPPLANE_SEL_PIPE_B			(1<<24)
+#define   DISPPLANE_SRC_KEY_ENABLE		(1<<22)
+#define   DISPPLANE_SRC_KEY_DISABLE		0
+#define   DISPPLANE_LINE_DOUBLE			(1<<20)
+#define   DISPPLANE_NO_LINE_DOUBLE		0
+#define   DISPPLANE_STEREO_POLARITY_FIRST	0
+#define   DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
+#define DSPAADDR		0x70184
+#define DSPASTRIDE		0x70188
+#define DSPAPOS			0x7018C /* reserved */
+#define DSPASIZE		0x70190
+#define DSPASURF		0x7019C /* 965+ only */
+#define DSPATILEOFF		0x701A4 /* 965+ only */
+
+/* VBIOS flags */
+#define SWF00			0x71410
+#define SWF01			0x71414
+#define SWF02			0x71418
+#define SWF03			0x7141c
+#define SWF04			0x71420
+#define SWF05			0x71424
+#define SWF06			0x71428
+#define SWF10			0x70410
+#define SWF11			0x70414
+#define SWF14			0x71420
+#define SWF30			0x72414
+#define SWF31			0x72418
+#define SWF32			0x7241c
+
+/* Pipe B */
+#define PIPEBDSL		0x71000
+#define PIPEBCONF		0x71008
+#define PIPEBSTAT		0x71024
+#define PIPEBFRAMEHIGH		0x71040
+#define PIPEBFRAMEPIXEL		0x71044
+
+/* Display B control */
+#define DSPBCNTR		0x71180
+#define   DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
+#define   DISPPLANE_ALPHA_TRANS_DISABLE		0
+#define   DISPPLANE_SPRITE_ABOVE_DISPLAY	0
+#define   DISPPLANE_SPRITE_ABOVE_OVERLAY	(1)
+#define DSPBADDR		0x71184
+#define DSPBSTRIDE		0x71188
+#define DSPBPOS			0x7118C
+#define DSPBSIZE		0x71190
+#define DSPBSURF		0x7119C
+#define DSPBTILEOFF		0x711A4
+
+/* VBIOS regs */
+#define VGACNTRL		0x71400
+# define VGA_DISP_DISABLE			(1 << 31)
+# define VGA_2X_MODE				(1 << 30)
+# define VGA_PIPE_B_SELECT			(1 << 29)
+
+#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
new file mode 100644
index 000000000000..603fe742ccd4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -0,0 +1,509 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (pipe == PIPE_A)
+		return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
+	else
+		return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->save_palette_a;
+	else
+		array = dev_priv->save_palette_b;
+
+	for(i = 0; i < 256; i++)
+		array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->save_palette_a;
+	else
+		array = dev_priv->save_palette_b;
+
+	for(i = 0; i < 256; i++)
+		I915_WRITE(reg + (i << 2), array[i]);
+}
+
+static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE8(index_port, reg);
+	return I915_READ8(data_port);
+}
+
+static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+	return I915_READ8(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+	I915_WRITE8(VGA_AR_DATA_WRITE, val);
+}
+
+static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE8(index_port, reg);
+	I915_WRITE8(data_port, val);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* VGA color palette registers */
+	dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+	/* DACCRX automatically increments during read */
+	I915_WRITE8(VGA_DACRX, 0);
+	/* Read 3 bytes of color data from each index */
+	for (i = 0; i < 256 * 3; i++)
+		dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
+
+	/* MSR bits */
+	dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
+	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* CRT controller regs */
+	i915_write_indexed(dev, cr_index, cr_data, 0x11,
+			   i915_read_indexed(dev, cr_index, cr_data, 0x11) &
+			   (~0x80));
+	for (i = 0; i <= 0x24; i++)
+		dev_priv->saveCR[i] =
+			i915_read_indexed(dev, cr_index, cr_data, i);
+	/* Make sure we don't turn off CR group 0 writes */
+	dev_priv->saveCR[0x11] &= ~0x80;
+
+	/* Attribute controller registers */
+	I915_READ8(st01);
+	dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+	for (i = 0; i <= 0x14; i++)
+		dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+	I915_READ8(st01);
+
+	/* Graphics controller registers */
+	for (i = 0; i < 9; i++)
+		dev_priv->saveGR[i] =
+			i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
+
+	dev_priv->saveGR[0x10] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+	dev_priv->saveGR[0x11] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+	dev_priv->saveGR[0x18] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+	/* Sequencer registers */
+	for (i = 0; i < 8; i++)
+		dev_priv->saveSR[i] =
+			i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* MSR bits */
+	I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
+	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* Sequencer registers, don't write SR07 */
+	for (i = 0; i < 7; i++)
+		i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
+				   dev_priv->saveSR[i]);
+
+	/* CRT controller regs */
+	/* Enable CR group 0 writes */
+	i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+	for (i = 0; i <= 0x24; i++)
+		i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+
+	/* Graphics controller regs */
+	for (i = 0; i < 9; i++)
+		i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
+				   dev_priv->saveGR[i]);
+
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+			   dev_priv->saveGR[0x10]);
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+			   dev_priv->saveGR[0x11]);
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+			   dev_priv->saveGR[0x18]);
+
+	/* Attribute controller registers */
+	I915_READ8(st01); /* switch back to index mode */
+	for (i = 0; i <= 0x14; i++)
+		i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+	I915_READ8(st01); /* switch back to index mode */
+	I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+	I915_READ8(st01);
+
+	/* VGA color palette registers */
+	I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+	/* DACCRX automatically increments during read */
+	I915_WRITE8(VGA_DACWX, 0);
+	/* Read 3 bytes of color data from each index */
+	for (i = 0; i < 256 * 3; i++)
+		I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
+
+}
+
+int i915_save_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+	/* Display arbitration control */
+	dev_priv->saveDSPARB = I915_READ(DSPARB);
+
+	/* Pipe & plane A info */
+	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+	dev_priv->saveFPA0 = I915_READ(FPA0);
+	dev_priv->saveFPA1 = I915_READ(FPA1);
+	dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+	if (IS_I965G(dev))
+		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+	dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
+	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+	dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
+	dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+	dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+	if (IS_I965G(dev)) {
+		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+	}
+	i915_save_palette(dev, PIPE_A);
+	dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
+
+	/* Pipe & plane B info */
+	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+	dev_priv->saveFPB0 = I915_READ(FPB0);
+	dev_priv->saveFPB1 = I915_READ(FPB1);
+	dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+	if (IS_I965G(dev))
+		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+	dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
+	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+	dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
+	dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+	dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+	if (IS_I965GM(dev) || IS_GM45(dev)) {
+		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+	}
+	i915_save_palette(dev, PIPE_B);
+	dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+	/* CRT state */
+	dev_priv->saveADPA = I915_READ(ADPA);
+
+	/* LVDS state */
+	dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+	dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+	dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+	if (IS_I965G(dev))
+		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		dev_priv->saveLVDS = I915_READ(LVDS);
+	if (!IS_I830(dev) && !IS_845G(dev))
+		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+	dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+	dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+	dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+
+	/* FIXME: save TV & SDVO state */
+
+	/* FBC state */
+	dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+	dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+	dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+	dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+
+	/* Interrupt state */
+	dev_priv->saveIIR = I915_READ(IIR);
+	dev_priv->saveIER = I915_READ(IER);
+	dev_priv->saveIMR = I915_READ(IMR);
+
+	/* VGA state */
+	dev_priv->saveVGA0 = I915_READ(VGA0);
+	dev_priv->saveVGA1 = I915_READ(VGA1);
+	dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+	dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+	/* Clock gating state */
+	dev_priv->saveD_STATE = I915_READ(D_STATE);
+	dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
+
+	/* Cache mode state */
+	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+	/* Memory Arbitration state */
+	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+	/* Scratch space */
+	for (i = 0; i < 16; i++) {
+		dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+	}
+	for (i = 0; i < 3; i++)
+		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+	i915_save_vga(dev);
+
+	return 0;
+}
+
+int i915_restore_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+	I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+
+	/* Pipe & plane A info */
+	/* Prime the clock */
+	if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+		I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+			   ~DPLL_VCO_ENABLE);
+		DRM_UDELAY(150);
+	}
+	I915_WRITE(FPA0, dev_priv->saveFPA0);
+	I915_WRITE(FPA1, dev_priv->saveFPA1);
+	/* Actually enable it */
+	I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+	DRM_UDELAY(150);
+	if (IS_I965G(dev))
+		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+	DRM_UDELAY(150);
+
+	/* Restore mode */
+	I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+	I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+	I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+	I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+	/* Restore plane info */
+	I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+	I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+	I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
+	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+	if (IS_I965G(dev)) {
+		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+	}
+
+	I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+	i915_restore_palette(dev, PIPE_A);
+	/* Enable the plane */
+	I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+	I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+
+	/* Pipe & plane B info */
+	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+		I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+			   ~DPLL_VCO_ENABLE);
+		DRM_UDELAY(150);
+	}
+	I915_WRITE(FPB0, dev_priv->saveFPB0);
+	I915_WRITE(FPB1, dev_priv->saveFPB1);
+	/* Actually enable it */
+	I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+	DRM_UDELAY(150);
+	if (IS_I965G(dev))
+		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+	DRM_UDELAY(150);
+
+	/* Restore mode */
+	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+	I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+	I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+	I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+	/* Restore plane info */
+	I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+	I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+	I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
+	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+	if (IS_I965G(dev)) {
+		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+	}
+
+	I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+
+	i915_restore_palette(dev, PIPE_B);
+	/* Enable the plane */
+	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+	I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+
+	/* CRT state */
+	I915_WRITE(ADPA, dev_priv->saveADPA);
+
+	/* LVDS state */
+	if (IS_I965G(dev))
+		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		I915_WRITE(LVDS, dev_priv->saveLVDS);
+	if (!IS_I830(dev) && !IS_845G(dev))
+		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+
+	I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+	I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+	I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+	I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+	I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+	I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+
+	/* FIXME: restore TV & SDVO state */
+
+	/* FBC info */
+	I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+	I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+	I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+	I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+
+	/* VGA state */
+	I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+	I915_WRITE(VGA0, dev_priv->saveVGA0);
+	I915_WRITE(VGA1, dev_priv->saveVGA1);
+	I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+	DRM_UDELAY(150);
+
+	/* Clock gating state */
+	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
+	I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
+
+	/* Cache mode state */
+	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+
+	/* Memory arbitration state */
+	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+
+	for (i = 0; i < 16; i++) {
+		I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
+		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+	}
+	for (i = 0; i < 3; i++)
+		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+	i915_restore_vga(dev);
+
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 5572939fc7d1..97ee566ef749 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -45,15 +45,16 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
-	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-	    DRIVER_IRQ_VBL,
+	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
 	.dev_priv_size = sizeof(drm_mga_buf_priv_t),
 	.load = mga_driver_load,
 	.unload = mga_driver_unload,
 	.lastclose = mga_driver_lastclose,
 	.dma_quiescent = mga_driver_dma_quiescent,
 	.device_is_agp = mga_driver_device_is_agp,
-	.vblank_wait = mga_driver_vblank_wait,
+	.get_vblank_counter = mga_get_vblank_counter,
+	.enable_vblank = mga_enable_vblank,
+	.disable_vblank = mga_disable_vblank,
 	.irq_preinstall = mga_driver_irq_preinstall,
 	.irq_postinstall = mga_driver_irq_postinstall,
 	.irq_uninstall = mga_driver_irq_uninstall,
@@ -64,20 +65,20 @@ static struct drm_driver driver = {
 	.ioctls = mga_ioctls,
 	.dma_ioctl = mga_dma_buffers,
 	.fops = {
-		 .owner = THIS_MODULE,
-		 .open = drm_open,
-		 .release = drm_release,
-		 .ioctl = drm_ioctl,
-		 .mmap = drm_mmap,
-		 .poll = drm_poll,
-		 .fasync = drm_fasync,
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.release = drm_release,
+		.ioctl = drm_ioctl,
+		.mmap = drm_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
-		 .compat_ioctl = mga_compat_ioctl,
+		.compat_ioctl = mga_compat_ioctl,
 #endif
-		 },
+	},
 	.pci_driver = {
-		 .name = DRIVER_NAME,
-		 .id_table = pciidlist,
+		.name = DRIVER_NAME,
+		.id_table = pciidlist,
 	},
 
 	.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index f6ebd24bd587..88257c276eb9 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -120,6 +120,7 @@ typedef struct drm_mga_private {
 	u32 clear_cmd;
 	u32 maccess;
 
+	atomic_t vbl_received;          /**< Number of vblanks received. */
 	wait_queue_head_t fence_queue;
 	atomic_t last_fence_retired;
 	u32 next_fence_to_post;
@@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
 extern int mga_warp_init(drm_mga_private_t * dev_priv);
 
 				/* mga_irq.c */
+extern int mga_enable_vblank(struct drm_device *dev, int crtc);
+extern void mga_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
 extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
 extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
 extern void mga_driver_irq_preinstall(struct drm_device * dev);
-extern void mga_driver_irq_postinstall(struct drm_device * dev);
+extern int mga_driver_irq_postinstall(struct drm_device *dev);
 extern void mga_driver_irq_uninstall(struct drm_device * dev);
 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
 			     unsigned long arg);
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 9302cb8f0f83..bab42f41188b 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -1,5 +1,6 @@
 /* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
- *
+ */
+/*
  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
  *
  * The Weather Channel (TM) funded Tungsten Graphics to develop the
@@ -35,6 +36,18 @@
 #include "mga_drm.h"
 #include "mga_drv.h"
 
+u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	const drm_mga_private_t *const dev_priv =
+		(drm_mga_private_t *) dev->dev_private;
+
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
+}
+
+
 irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
@@ -47,9 +60,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 	/* VBLANK interrupt */
 	if (status & MGA_VLINEPEN) {
 		MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
-		atomic_inc(&dev->vbl_received);
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
+		atomic_inc(&dev_priv->vbl_received);
+		drm_handle_vblank(dev, 0);
 		handled = 1;
 	}
 
@@ -58,6 +70,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
 		const u32 prim_end = MGA_READ(MGA_PRIMEND);
 
+
 		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
 
 		/* In addition to clearing the interrupt-pending bit, we
@@ -72,28 +85,39 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 		handled = 1;
 	}
 
-	if (handled) {
+	if (handled)
 		return IRQ_HANDLED;
-	}
 	return IRQ_NONE;
 }
 
-int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int mga_enable_vblank(struct drm_device *dev, int crtc)
 {
-	unsigned int cur_vblank;
-	int ret = 0;
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
-	/* Assume that the user has missed the current sequence number
-	 * by about a day rather than she wants to wait for years
-	 * using vertical blanks...
-	 */
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(&dev->vbl_received))
-		      - *sequence) <= (1 << 23)));
+	if (crtc != 0) {
+		DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+			  crtc);
+		return 0;
+	}
 
-	*sequence = cur_vblank;
+	MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
+	return 0;
+}
 
-	return ret;
+
+void mga_disable_vblank(struct drm_device *dev, int crtc)
+{
+	if (crtc != 0) {
+		DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
+			  crtc);
+	}
+
+	/* Do *NOT* disable the vertical refresh interrupt.  MGA doesn't have
+	 * a nice hardware counter that tracks the number of refreshes when
+	 * the interrupt is disabled, and the kernel doesn't know the refresh
+	 * rate to calculate an estimate.
+	 */
+	/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
 }
 
 int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
@@ -125,14 +149,22 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
 	MGA_WRITE(MGA_ICLEAR, ~0);
 }
 
-void mga_driver_irq_postinstall(struct drm_device * dev)
+int mga_driver_irq_postinstall(struct drm_device *dev)
 {
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	int ret;
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret)
+		return ret;
 
 	DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
 
-	/* Turn on vertical blank interrupt and soft trap interrupt. */
-	MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
+	/* Turn on soft trap interrupt.  Vertical blank interrupts are enabled
+	 * in mga_enable_vblank.
+	 */
+	MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
+	return 0;
 }
 
 void mga_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index d3f8aade07b3..b710fab21cb3 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1022,7 +1022,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
 
 	switch (param->param) {
 	case MGA_PARAM_IRQ_NR:
-		value = dev->irq;
+		value = drm_dev_to_irq(dev);
 		break;
 	case MGA_PARAM_CARD_TYPE:
 		value = dev_priv->chipset;
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 6108e7587e12..3265d53ba91f 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -43,12 +43,13 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
-	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-	    DRIVER_IRQ_VBL,
+	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
 	.dev_priv_size = sizeof(drm_r128_buf_priv_t),
 	.preclose = r128_driver_preclose,
 	.lastclose = r128_driver_lastclose,
-	.vblank_wait = r128_driver_vblank_wait,
+	.get_vblank_counter = r128_get_vblank_counter,
+	.enable_vblank = r128_enable_vblank,
+	.disable_vblank = r128_disable_vblank,
 	.irq_preinstall = r128_driver_irq_preinstall,
 	.irq_postinstall = r128_driver_irq_postinstall,
 	.irq_uninstall = r128_driver_irq_uninstall,
@@ -59,21 +60,20 @@ static struct drm_driver driver = {
 	.ioctls = r128_ioctls,
 	.dma_ioctl = r128_cce_buffers,
 	.fops = {
-		 .owner = THIS_MODULE,
-		 .open = drm_open,
-		 .release = drm_release,
-		 .ioctl = drm_ioctl,
-		 .mmap = drm_mmap,
-		 .poll = drm_poll,
-		 .fasync = drm_fasync,
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.release = drm_release,
+		.ioctl = drm_ioctl,
+		.mmap = drm_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
-		 .compat_ioctl = r128_compat_ioctl,
+		.compat_ioctl = r128_compat_ioctl,
 #endif
 	},
-
 	.pci_driver = {
-		 .name = DRIVER_NAME,
-		 .id_table = pciidlist,
+		.name = DRIVER_NAME,
+		.id_table = pciidlist,
 	},
 
 	.name = DRIVER_NAME,
@@ -87,6 +87,7 @@ static struct drm_driver driver = {
 static int __init r128_init(void)
 {
 	driver.num_ioctls = r128_max_ioctl;
+
 	return drm_init(&driver);
 }
 
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 011105e51ac6..5898b274279d 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -29,7 +29,7 @@
  *    Rickard E. (Rik) Faith <faith@valinux.com>
  *    Kevin E. Martin <martin@valinux.com>
  *    Gareth Hughes <gareth@valinux.com>
- *    Michel Dänzer <daenzerm@student.ethz.ch>
+ *    Michel D�zer <daenzerm@student.ethz.ch>
  */
 
 #ifndef __R128_DRV_H__
@@ -97,6 +97,8 @@ typedef struct drm_r128_private {
 	u32 crtc_offset;
 	u32 crtc_offset_cntl;
 
+	atomic_t vbl_received;
+
 	u32 color_fmt;
 	unsigned int front_offset;
 	unsigned int front_pitch;
@@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
 extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
 extern int r128_do_cleanup_cce(struct drm_device * dev);
 
-extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
-
+extern int r128_enable_vblank(struct drm_device *dev, int crtc);
+extern void r128_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
 extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
 extern void r128_driver_irq_preinstall(struct drm_device * dev);
-extern void r128_driver_irq_postinstall(struct drm_device * dev);
+extern int r128_driver_irq_postinstall(struct drm_device *dev);
 extern void r128_driver_irq_uninstall(struct drm_device * dev);
 extern void r128_driver_lastclose(struct drm_device * dev);
 extern void r128_driver_preclose(struct drm_device * dev,
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index c76fdca7662d..d7349012a680 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -35,6 +35,16 @@
 #include "r128_drm.h"
 #include "r128_drv.h"
 
+u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	const drm_r128_private_t *dev_priv = dev->dev_private;
+
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
+}
+
 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
@@ -46,30 +56,38 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
 	/* VBLANK interrupt */
 	if (status & R128_CRTC_VBLANK_INT) {
 		R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
-		atomic_inc(&dev->vbl_received);
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
+		atomic_inc(&dev_priv->vbl_received);
+		drm_handle_vblank(dev, 0);
 		return IRQ_HANDLED;
 	}
 	return IRQ_NONE;
 }
 
-int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int r128_enable_vblank(struct drm_device *dev, int crtc)
 {
-	unsigned int cur_vblank;
-	int ret = 0;
+	drm_r128_private_t *dev_priv = dev->dev_private;
 
-	/* Assume that the user has missed the current sequence number
-	 * by about a day rather than she wants to wait for years
-	 * using vertical blanks...
-	 */
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(&dev->vbl_received))
-		      - *sequence) <= (1 << 23)));
+	if (crtc != 0) {
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+		return -EINVAL;
+	}
 
-	*sequence = cur_vblank;
+	R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
+	return 0;
+}
+
+void r128_disable_vblank(struct drm_device *dev, int crtc)
+{
+	if (crtc != 0)
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
 
-	return ret;
+	/*
+	 * FIXME: implement proper interrupt disable by using the vblank
+	 * counter register (if available)
+	 *
+	 * R128_WRITE(R128_GEN_INT_CNTL,
+	 *            R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
+	 */
 }
 
 void r128_driver_irq_preinstall(struct drm_device * dev)
@@ -82,12 +100,9 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
 	R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
 }
 
-void r128_driver_irq_postinstall(struct drm_device * dev)
+int r128_driver_irq_postinstall(struct drm_device *dev)
 {
-	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
-
-	/* Turn on VBL interrupt */
-	R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
+	return drm_vblank_init(dev, 1);
 }
 
 void r128_driver_irq_uninstall(struct drm_device * dev)
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 51a9afce7b9b..f7a5b5740764 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1629,7 +1629,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
 
 	switch (param->param) {
 	case R128_PARAM_IRQ_NR:
-		value = dev->irq;
+		value = drm_dev_to_irq(dev);
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 248ab4a7d39f..59a2132a8f57 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -71,7 +71,8 @@ static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
 
 static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
 {
-	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 		return RS690_READ_MCIND(dev_priv, addr);
 	else
 		return RS480_READ_MCIND(dev_priv, addr);
@@ -82,7 +83,8 @@ u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
 
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
 		return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
-	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 		return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
 	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
 		return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
@@ -94,7 +96,8 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
 {
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
 		R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
-	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 		RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
 	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
 		R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
@@ -106,7 +109,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
 {
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
 		R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
-	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 		RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
 	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
 		R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
@@ -122,15 +126,17 @@ static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
 		R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
 		R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
-	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
 		RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
 		RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
 	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
 		R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
 		R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
-	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) {
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
 		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
-		RADEON_WRITE(RS480_AGP_BASE_2, 0);
+		RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
 	} else {
 		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
 		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
@@ -347,6 +353,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
 		DRM_INFO("Loading R300 Microcode\n");
 		for (i = 0; i < 256; i++) {
@@ -356,6 +363,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
 				     R300_cp_microcode[i][0]);
 		}
 	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
 		DRM_INFO("Loading R400 Microcode\n");
 		for (i = 0; i < 256; i++) {
@@ -364,8 +372,9 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
 			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
 				     R420_cp_microcode[i][0]);
 		}
-	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
-		DRM_INFO("Loading RS690 Microcode\n");
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		DRM_INFO("Loading RS690/RS740 Microcode\n");
 		for (i = 0; i < 256; i++) {
 			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
 				     RS690_cp_microcode[i][1]);
@@ -626,8 +635,6 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
 		     dev_priv->ring.size_l2qw);
 #endif
 
-	/* Start with assuming that writeback doesn't work */
-	dev_priv->writeback_works = 0;
 
 	/* Initialize the scratch register pointer.  This will cause
 	 * the scratch register values to be written out to memory
@@ -646,8 +653,18 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
 	RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
 
 	/* Turn on bus mastering */
-	tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
-	RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		/* rs400, rs690/rs740 */
+		tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS400_BUS_MASTER_DIS;
+		RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	} else if (!(((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R423))) {
+		/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */
+		tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+		RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	} /* PCIE cards appears to not need this */
 
 	dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
 	RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
@@ -674,6 +691,9 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
 {
 	u32 tmp;
 
+	/* Start with assuming that writeback doesn't work */
+	dev_priv->writeback_works = 0;
+
 	/* Writeback doesn't seem to work everywhere, test it here and possibly
 	 * enable it if it appears to work
 	 */
@@ -719,7 +739,8 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
 			  dev_priv->gart_size);
 
 		temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
-		if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
 							     RS690_BLOCK_GFX_D3_EN));
 		else
@@ -812,6 +833,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
 	u32 tmp;
 
 	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
 	    (dev_priv->flags & RADEON_IS_IGPGART)) {
 		radeon_set_igpgart(dev_priv, on);
 		return;
@@ -1286,7 +1308,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
 	radeon_cp_init_ring_buffer(dev, dev_priv);
 
 	radeon_do_engine_reset(dev);
-	radeon_enable_interrupt(dev);
+	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
 
 	DRM_DEBUG("radeon_do_resume_cp() complete\n");
 
@@ -1708,6 +1730,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
 	case CHIP_R300:
 	case CHIP_R350:
 	case CHIP_R420:
+	case CHIP_R423:
 	case CHIP_RV410:
 	case CHIP_RV515:
 	case CHIP_R520:
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 637bd7faf132..71af746a4e47 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -52,6 +52,28 @@ static int dri_library_name(struct drm_device *dev, char *buf)
 		        "r300"));
 }
 
+static int radeon_suspend(struct drm_device *dev, pm_message_t state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	/* Disable *all* interrupts */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+	return 0;
+}
+
+static int radeon_resume(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	/* Restore interrupt registers */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+	return 0;
+}
+
 static struct pci_device_id pciidlist[] = {
 	radeon_PCI_IDS
 };
@@ -59,8 +81,7 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
-	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
-	    DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
+	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
 	.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
 	.load = radeon_driver_load,
 	.firstopen = radeon_driver_firstopen,
@@ -69,8 +90,11 @@ static struct drm_driver driver = {
 	.postclose = radeon_driver_postclose,
 	.lastclose = radeon_driver_lastclose,
 	.unload = radeon_driver_unload,
-	.vblank_wait = radeon_driver_vblank_wait,
-	.vblank_wait2 = radeon_driver_vblank_wait2,
+	.suspend = radeon_suspend,
+	.resume = radeon_resume,
+	.get_vblank_counter = radeon_get_vblank_counter,
+	.enable_vblank = radeon_enable_vblank,
+	.disable_vblank = radeon_disable_vblank,
 	.dri_library_name = dri_library_name,
 	.irq_preinstall = radeon_driver_irq_preinstall,
 	.irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 099381693175..4dbb813910c3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -122,9 +122,12 @@ enum radeon_family {
 	CHIP_RV350,
 	CHIP_RV380,
 	CHIP_R420,
+	CHIP_R423,
 	CHIP_RV410,
+	CHIP_RS400,
 	CHIP_RS480,
 	CHIP_RS690,
+	CHIP_RS740,
 	CHIP_RV515,
 	CHIP_R520,
 	CHIP_RV530,
@@ -378,17 +381,17 @@ extern void radeon_mem_release(struct drm_file *file_priv,
 			       struct mem_block *heap);
 
 				/* radeon_irq.c */
+extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
 extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
 
 extern void radeon_do_release(struct drm_device * dev);
-extern int radeon_driver_vblank_wait(struct drm_device * dev,
-				     unsigned int *sequence);
-extern int radeon_driver_vblank_wait2(struct drm_device * dev,
-				      unsigned int *sequence);
+extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
+extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
 extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
 extern void radeon_driver_irq_preinstall(struct drm_device * dev);
-extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+extern int radeon_driver_irq_postinstall(struct drm_device *dev);
 extern void radeon_driver_irq_uninstall(struct drm_device * dev);
 extern void radeon_enable_interrupt(struct drm_device *dev);
 extern int radeon_vblank_crtc_get(struct drm_device *dev);
@@ -397,19 +400,22 @@ extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
 extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
 extern int radeon_driver_unload(struct drm_device *dev);
 extern int radeon_driver_firstopen(struct drm_device *dev);
-extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv);
-extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
+extern void radeon_driver_preclose(struct drm_device *dev,
+				   struct drm_file *file_priv);
+extern void radeon_driver_postclose(struct drm_device *dev,
+				    struct drm_file *file_priv);
 extern void radeon_driver_lastclose(struct drm_device * dev);
-extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
+extern int radeon_driver_open(struct drm_device *dev,
+			      struct drm_file *file_priv);
 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
 				unsigned long arg);
 
 /* r300_cmdbuf.c */
 extern void r300_init_reg_flags(struct drm_device *dev);
 
-extern int r300_do_cp_cmdbuf(struct drm_device * dev,
+extern int r300_do_cp_cmdbuf(struct drm_device *dev,
 			     struct drm_file *file_priv,
-			     drm_radeon_kcmd_buffer_t * cmdbuf);
+			     drm_radeon_kcmd_buffer_t *cmdbuf);
 
 /* Flags for stats.boxes
  */
@@ -434,8 +440,31 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
 #	define RADEON_SCISSOR_1_ENABLE		(1 << 29)
 #	define RADEON_SCISSOR_2_ENABLE		(1 << 30)
 
+/*
+ * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
+ * don't have an explicit bus mastering disable bit.  It's handled
+ * by the PCI D-states.  PMI_BM_DIS disables D-state bus master
+ * handling, not bus mastering itself.
+ */
 #define RADEON_BUS_CNTL			0x0030
+/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */
 #	define RADEON_BUS_MASTER_DIS		(1 << 6)
+/* rs400, rs690/rs740 */
+#	define RS400_BUS_MASTER_DIS		(1 << 14)
+#	define RS400_MSI_REARM		        (1 << 20)
+/* see RS480_MSI_REARM in AIC_CNTL for rs480 */
+
+#define RADEON_BUS_CNTL1		0x0034
+#	define RADEON_PMI_BM_DIS		(1 << 2)
+#	define RADEON_PMI_INT_DIS		(1 << 3)
+
+#define RV370_BUS_CNTL			0x004c
+#	define RV370_PMI_BM_DIS		        (1 << 5)
+#	define RV370_PMI_INT_DIS		(1 << 6)
+
+#define RADEON_MSI_REARM_EN		0x0160
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#	define RV370_MSI_REARM_EN		(1 << 0)
 
 #define RADEON_CLOCK_CNTL_DATA		0x000c
 #	define RADEON_PLL_WR_EN			(1 << 7)
@@ -623,6 +652,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
 #	define RADEON_SW_INT_TEST		(1 << 25)
 #	define RADEON_SW_INT_TEST_ACK		(1 << 25)
 #	define RADEON_SW_INT_FIRE		(1 << 26)
+#       define R500_DISPLAY_INT_STATUS          (1 << 0)
 
 #define RADEON_HOST_PATH_CNTL		0x0130
 #	define RADEON_HDP_SOFT_RESET		(1 << 26)
@@ -907,6 +937,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
 
 #define RADEON_AIC_CNTL			0x01d0
 #	define RADEON_PCIGART_TRANSLATE_EN	(1 << 0)
+#	define RS480_MSI_REARM	                (1 << 3)
 #define RADEON_AIC_STAT			0x01d4
 #define RADEON_AIC_PT_BASE		0x01d8
 #define RADEON_AIC_LO_ADDR		0x01dc
@@ -1116,6 +1147,9 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
 
 #define R200_VAP_PVS_CNTL_1               0x22D0
 
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+
 #define R500_D1CRTC_STATUS 0x609c
 #define R500_D2CRTC_STATUS 0x689c
 #define R500_CRTC_V_BLANK (1<<0)
@@ -1200,7 +1234,8 @@ do {								\
 
 #define IGP_WRITE_MCIND(addr, val)				\
 do {									\
-	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)       \
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||   \
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))      \
 		RS690_WRITE_MCIND(addr, val);				\
 	else								\
 		RS480_WRITE_MCIND(addr, val);				\
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index ee40d197deb7..5079f7054a2f 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -27,7 +27,7 @@
  *
  * Authors:
  *    Keith Whitwell <keith@tungstengraphics.com>
- *    Michel Dänzer <michel@daenzer.net>
+ *    Michel D�zer <michel@daenzer.net>
  */
 
 #include "drmP.h"
@@ -35,12 +35,128 @@
 #include "radeon_drm.h"
 #include "radeon_drv.h"
 
-static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
-					      u32 mask)
+void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
 {
-	u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (state)
+		dev_priv->irq_enable_reg |= mask;
+	else
+		dev_priv->irq_enable_reg &= ~mask;
+
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+}
+
+static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (state)
+		dev_priv->r500_disp_irq_reg |= mask;
+	else
+		dev_priv->r500_disp_irq_reg &= ~mask;
+
+	RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+}
+
+int radeon_enable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		switch (crtc) {
+		case 0:
+			r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
+			break;
+		case 1:
+			r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			return EINVAL;
+		}
+	} else {
+		switch (crtc) {
+		case 0:
+			radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
+			break;
+		case 1:
+			radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			return EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void radeon_disable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		switch (crtc) {
+		case 0:
+			r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
+			break;
+		case 1:
+			r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			break;
+		}
+	} else {
+		switch (crtc) {
+		case 0:
+			radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
+			break;
+		case 1:
+			radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			break;
+		}
+	}
+}
+
+static inline u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
+{
+	u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
+	u32 irq_mask = RADEON_SW_INT_TEST;
+
+	*r500_disp_int = 0;
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		/* vbl interrupts in a different place */
+
+		if (irqs & R500_DISPLAY_INT_STATUS) {
+			/* if a display interrupt */
+			u32 disp_irq;
+
+			disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
+
+			*r500_disp_int = disp_irq;
+			if (disp_irq & R500_D1_VBLANK_INTERRUPT)
+				RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+			if (disp_irq & R500_D2_VBLANK_INTERRUPT)
+				RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+		}
+		irq_mask |= R500_DISPLAY_INT_STATUS;
+	} else
+		irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
+
+	irqs &=	irq_mask;
+
 	if (irqs)
 		RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+
 	return irqs;
 }
 
@@ -68,44 +184,33 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
 	u32 stat;
+	u32 r500_disp_int;
 
 	/* Only consider the bits we're interested in - others could be used
 	 * outside the DRM
 	 */
-	stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
-						  RADEON_CRTC_VBLANK_STAT |
-						  RADEON_CRTC2_VBLANK_STAT));
+	stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
 	if (!stat)
 		return IRQ_NONE;
 
 	stat &= dev_priv->irq_enable_reg;
 
 	/* SW interrupt */
-	if (stat & RADEON_SW_INT_TEST) {
+	if (stat & RADEON_SW_INT_TEST)
 		DRM_WAKEUP(&dev_priv->swi_queue);
-	}
 
 	/* VBLANK interrupt */
-	if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
-		int vblank_crtc = dev_priv->vblank_crtc;
-
-		if ((vblank_crtc &
-		     (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
-		    (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
-			if (stat & RADEON_CRTC_VBLANK_STAT)
-				atomic_inc(&dev->vbl_received);
-			if (stat & RADEON_CRTC2_VBLANK_STAT)
-				atomic_inc(&dev->vbl_received2);
-		} else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
-			   (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
-			   ((stat & RADEON_CRTC2_VBLANK_STAT) &&
-			    (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
-			atomic_inc(&dev->vbl_received);
-
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
+			drm_handle_vblank(dev, 0);
+		if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
+			drm_handle_vblank(dev, 1);
+	} else {
+		if (stat & RADEON_CRTC_VBLANK_STAT)
+			drm_handle_vblank(dev, 0);
+		if (stat & RADEON_CRTC2_VBLANK_STAT)
+			drm_handle_vblank(dev, 1);
 	}
-
 	return IRQ_HANDLED;
 }
 
@@ -144,54 +249,31 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
 	return ret;
 }
 
-static int radeon_driver_vblank_do_wait(struct drm_device * dev,
-					unsigned int *sequence, int crtc)
+u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
 {
-	drm_radeon_private_t *dev_priv =
-	    (drm_radeon_private_t *) dev->dev_private;
-	unsigned int cur_vblank;
-	int ret = 0;
-	int ack = 0;
-	atomic_t *counter;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
 	if (!dev_priv) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
 
-	if (crtc == DRM_RADEON_VBLANK_CRTC1) {
-		counter = &dev->vbl_received;
-		ack |= RADEON_CRTC_VBLANK_STAT;
-	} else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
-		counter = &dev->vbl_received2;
-		ack |= RADEON_CRTC2_VBLANK_STAT;
-	} else
+	if (crtc < 0 || crtc > 1) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
 		return -EINVAL;
+	}
 
-	radeon_acknowledge_irqs(dev_priv, ack);
-
-	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
-
-	/* Assume that the user has missed the current sequence number
-	 * by about a day rather than she wants to wait for years
-	 * using vertical blanks...
-	 */
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(counter))
-		      - *sequence) <= (1 << 23)));
-
-	*sequence = cur_vblank;
-
-	return ret;
-}
-
-int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
-{
-	return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
-}
-
-int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
-{
-	return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		if (crtc == 0)
+			return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
+		else
+			return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
+	} else {
+		if (crtc == 0)
+			return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
+		else
+			return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
+	}
 }
 
 /* Needs the lock as it touches the ring.
@@ -234,46 +316,41 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
 	return radeon_wait_irq(dev, irqwait->irq_seq);
 }
 
-void radeon_enable_interrupt(struct drm_device *dev)
-{
-	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
-
-	dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
-	if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
-		dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
-
-	if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
-		dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
-
-	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
-	dev_priv->irq_enabled = 1;
-}
-
 /* drm_dma.h hooks
 */
 void radeon_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
+	u32 dummy;
 
 	/* Disable *all* interrupts */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
 	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
 
 	/* Clear bits if they're already high */
-	radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
-					   RADEON_CRTC_VBLANK_STAT |
-					   RADEON_CRTC2_VBLANK_STAT));
+	radeon_acknowledge_irqs(dev_priv, &dummy);
 }
 
-void radeon_driver_irq_postinstall(struct drm_device * dev)
+int radeon_driver_irq_postinstall(struct drm_device *dev)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
+	int ret;
 
 	atomic_set(&dev_priv->swi_emitted, 0);
 	DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
 
-	radeon_enable_interrupt(dev);
+	ret = drm_vblank_init(dev, 2);
+	if (ret)
+		return ret;
+
+	dev->max_vblank_count = 0x001fffff;
+
+	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+	return 0;
 }
 
 void radeon_driver_irq_uninstall(struct drm_device * dev)
@@ -285,6 +362,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
 
 	dev_priv->irq_enabled = 0;
 
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
 	/* Disable *all* interrupts */
 	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
 }
@@ -293,18 +372,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
 int radeon_vblank_crtc_get(struct drm_device *dev)
 {
 	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
-	u32 flag;
-	u32 value;
-
-	flag = RADEON_READ(RADEON_GEN_INT_CNTL);
-	value = 0;
-
-	if (flag & RADEON_CRTC_VBLANK_MASK)
-		value |= DRM_RADEON_VBLANK_CRTC1;
 
-	if (flag & RADEON_CRTC2_VBLANK_MASK)
-		value |= DRM_RADEON_VBLANK_CRTC2;
-	return value;
+	return dev_priv->vblank_crtc;
 }
 
 int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
@@ -315,6 +384,5 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
 		return -EINVAL;
 	}
 	dev_priv->vblank_crtc = (unsigned int)value;
-	radeon_enable_interrupt(dev);
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 11c146b49211..5d7153fcc7b0 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -2997,7 +2997,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
 		value = GET_SCRATCH(2);
 		break;
 	case RADEON_PARAM_IRQ_NR:
-		value = dev->irq;
+		value = drm_dev_to_irq(dev);
 		break;
 	case RADEON_PARAM_GART_BASE:
 		value = dev_priv->gart_vm_start;
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index b3878770fce1..af22111397d8 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -41,7 +41,7 @@
 #define AGP_TYPE 1
 
 
-#if defined(CONFIG_FB_SIS)
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
 /* fb management via fb device */
 
 #define SIS_MM_ALIGN_SHIFT 0
@@ -57,7 +57,7 @@ static void *sis_sman_mm_allocate(void *private, unsigned long size,
 	if (req.size == 0)
 		return NULL;
 	else
-		return (void *)~req.offset;
+		return (void *)(unsigned long)~req.offset;
 }
 
 static void sis_sman_mm_free(void *private, void *ref)
@@ -75,12 +75,12 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
 	return ~((unsigned long)ref);
 }
 
-#else /* CONFIG_FB_SIS */
+#else /* CONFIG_FB_SIS[_MODULE] */
 
 #define SIS_MM_ALIGN_SHIFT 4
 #define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
 
-#endif /* CONFIG_FB_SIS */
+#endif /* CONFIG_FB_SIS[_MODULE] */
 
 static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
@@ -89,7 +89,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
 	int ret;
 
 	mutex_lock(&dev->struct_mutex);
-#if defined(CONFIG_FB_SIS)
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
 	{
 		struct drm_sman_mm sman_mm;
 		sman_mm.private = (void *)0xFFFFFFFF;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index 80c01cdfa37d..0993b441fc42 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -40,11 +40,13 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
-	    DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+	    DRIVER_IRQ_SHARED,
 	.load = via_driver_load,
 	.unload = via_driver_unload,
 	.context_dtor = via_final_context,
-	.vblank_wait = via_driver_vblank_wait,
+	.get_vblank_counter = via_get_vblank_counter,
+	.enable_vblank = via_enable_vblank,
+	.disable_vblank = via_disable_vblank,
 	.irq_preinstall = via_driver_irq_preinstall,
 	.irq_postinstall = via_driver_irq_postinstall,
 	.irq_uninstall = via_driver_irq_uninstall,
@@ -59,17 +61,17 @@ static struct drm_driver driver = {
 	.get_reg_ofs = drm_core_get_reg_ofs,
 	.ioctls = via_ioctls,
 	.fops = {
-		 .owner = THIS_MODULE,
-		 .open = drm_open,
-		 .release = drm_release,
-		 .ioctl = drm_ioctl,
-		 .mmap = drm_mmap,
-		 .poll = drm_poll,
-		 .fasync = drm_fasync,
-	},
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.release = drm_release,
+		.ioctl = drm_ioctl,
+		.mmap = drm_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
+		},
 	.pci_driver = {
-		 .name = DRIVER_NAME,
-		 .id_table = pciidlist,
+		.name = DRIVER_NAME,
+		.id_table = pciidlist,
 	},
 
 	.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 2daae81874cd..cafcb844a223 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -75,6 +75,7 @@ typedef struct drm_via_private {
 	struct timeval last_vblank;
 	int last_vblank_valid;
 	unsigned usec_per_vblank;
+	atomic_t vbl_received;
 	drm_via_state_t hc_state;
 	char pci_buf[VIA_PCI_BUF_SIZE];
 	const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
@@ -130,21 +131,24 @@ extern int via_init_context(struct drm_device * dev, int context);
 extern int via_final_context(struct drm_device * dev, int context);
 
 extern int via_do_cleanup_map(struct drm_device * dev);
-extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
+extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int via_enable_vblank(struct drm_device *dev, int crtc);
+extern void via_disable_vblank(struct drm_device *dev, int crtc);
 
 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
 extern void via_driver_irq_preinstall(struct drm_device * dev);
-extern void via_driver_irq_postinstall(struct drm_device * dev);
+extern int via_driver_irq_postinstall(struct drm_device *dev);
 extern void via_driver_irq_uninstall(struct drm_device * dev);
 
 extern int via_dma_cleanup(struct drm_device * dev);
 extern void via_init_command_verifier(void);
 extern int via_driver_dma_quiescent(struct drm_device * dev);
-extern void via_init_futex(drm_via_private_t * dev_priv);
-extern void via_cleanup_futex(drm_via_private_t * dev_priv);
-extern void via_release_futex(drm_via_private_t * dev_priv, int context);
+extern void via_init_futex(drm_via_private_t *dev_priv);
+extern void via_cleanup_futex(drm_via_private_t *dev_priv);
+extern void via_release_futex(drm_via_private_t *dev_priv, int context);
 
-extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv);
+extern void via_reclaim_buffers_locked(struct drm_device *dev,
+				       struct drm_file *file_priv);
 extern void via_lastclose(struct drm_device *dev);
 
 extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index c6bb978a1106..665d319b927b 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -43,7 +43,7 @@
 #define VIA_REG_INTERRUPT       0x200
 
 /* VIA_REG_INTERRUPT */
-#define VIA_IRQ_GLOBAL          (1 << 31)
+#define VIA_IRQ_GLOBAL	  (1 << 31)
 #define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
 #define VIA_IRQ_VBLANK_PENDING  (1 << 3)
 #define VIA_IRQ_HQV0_ENABLE     (1 << 11)
@@ -68,16 +68,15 @@
 
 static maskarray_t via_pro_group_a_irqs[] = {
 	{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
-	 0x00000000},
+	 0x00000000 },
 	{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
-	 0x00000000},
+	 0x00000000 },
 	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
 	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
 };
-static int via_num_pro_group_a =
-    sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
+static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
 static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
 
 static maskarray_t via_unichrome_irqs[] = {
@@ -86,14 +85,24 @@ static maskarray_t via_unichrome_irqs[] = {
 	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
 };
-static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
+static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
 static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
 
+
 static unsigned time_diff(struct timeval *now, struct timeval *then)
 {
 	return (now->tv_usec >= then->tv_usec) ?
-	    now->tv_usec - then->tv_usec :
-	    1000000 - (then->tv_usec - now->tv_usec);
+		now->tv_usec - then->tv_usec :
+		1000000 - (then->tv_usec - now->tv_usec);
+}
+
+u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
 }
 
 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -108,23 +117,22 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
 
 	status = VIA_READ(VIA_REG_INTERRUPT);
 	if (status & VIA_IRQ_VBLANK_PENDING) {
-		atomic_inc(&dev->vbl_received);
-		if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
+		atomic_inc(&dev_priv->vbl_received);
+		if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
 			do_gettimeofday(&cur_vblank);
 			if (dev_priv->last_vblank_valid) {
 				dev_priv->usec_per_vblank =
-				    time_diff(&cur_vblank,
-					      &dev_priv->last_vblank) >> 4;
+					time_diff(&cur_vblank,
+						  &dev_priv->last_vblank) >> 4;
 			}
 			dev_priv->last_vblank = cur_vblank;
 			dev_priv->last_vblank_valid = 1;
 		}
-		if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
+		if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
 			DRM_DEBUG("US per vblank is: %u\n",
 				  dev_priv->usec_per_vblank);
 		}
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
+		drm_handle_vblank(dev, 0);
 		handled = 1;
 	}
 
@@ -145,6 +153,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
 	/* Acknowlege interrupts */
 	VIA_WRITE(VIA_REG_INTERRUPT, status);
 
+
 	if (handled)
 		return IRQ_HANDLED;
 	else
@@ -163,31 +172,34 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
 	}
 }
 
-int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int via_enable_vblank(struct drm_device *dev, int crtc)
 {
-	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-	unsigned int cur_vblank;
-	int ret = 0;
+	drm_via_private_t *dev_priv = dev->dev_private;
+	u32 status;
 
-	DRM_DEBUG("\n");
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
+	if (crtc != 0) {
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
 		return -EINVAL;
 	}
 
-	viadrv_acknowledge_irqs(dev_priv);
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
+
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
 
-	/* Assume that the user has missed the current sequence number
-	 * by about a day rather than she wants to wait for years
-	 * using vertical blanks...
-	 */
+	return 0;
+}
+
+void via_disable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
 
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(&dev->vbl_received)) -
-		      *sequence) <= (1 << 23)));
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
 
-	*sequence = cur_vblank;
-	return ret;
+	if (crtc != 0)
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
 }
 
 static int
@@ -239,6 +251,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
 	return ret;
 }
 
+
 /*
  * drm_dma.h hooks
  */
@@ -292,23 +305,25 @@ void via_driver_irq_preinstall(struct drm_device * dev)
 	}
 }
 
-void via_driver_irq_postinstall(struct drm_device * dev)
+int via_driver_irq_postinstall(struct drm_device *dev)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	u32 status;
 
-	DRM_DEBUG("\n");
-	if (dev_priv) {
-		status = VIA_READ(VIA_REG_INTERRUPT);
-		VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
-			  | dev_priv->irq_enable_mask);
+	DRM_DEBUG("via_driver_irq_postinstall\n");
+	if (!dev_priv)
+		return -EINVAL;
 
-		/* Some magic, oh for some data sheets ! */
+	drm_vblank_init(dev, 1);
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+		  | dev_priv->irq_enable_mask);
 
-		VIA_WRITE8(0x83d4, 0x11);
-		VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+	/* Some magic, oh for some data sheets ! */
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
 
-	}
+	return 0;
 }
 
 void via_driver_irq_uninstall(struct drm_device * dev)
@@ -339,9 +354,6 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
 	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
 	int force_sequence;
 
-	if (!dev->irq)
-		return -EINVAL;
-
 	if (irqwait->request.irq >= dev_priv->num_irqs) {
 		DRM_ERROR("Trying to wait on unknown irq %d\n",
 			  irqwait->request.irq);
@@ -352,7 +364,8 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
 	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
 	case VIA_IRQ_RELATIVE:
-		irqwait->request.sequence += atomic_read(&cur_irq->irq_received);
+		irqwait->request.sequence +=
+			atomic_read(&cur_irq->irq_received);
 		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
 	case VIA_IRQ_ABSOLUTE:
 		break;
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index e64094916e4f..f694cb5ededc 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -93,8 +93,7 @@ int via_final_context(struct drm_device *dev, int context)
 	/* Last context, perform cleanup */
 	if (dev->ctx_count == 1 && dev->dev_private) {
 		DRM_DEBUG("Last Context\n");
-		if (dev->irq)
-			drm_irq_uninstall(dev);
+		drm_irq_uninstall(dev);
 		via_cleanup_futex(dev_priv);
 		via_do_cleanup_map(dev);
 	}
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 38d3c6b8276a..f46ba4b57da4 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,7 +36,6 @@
 #ifndef _DRM_H_
 #define _DRM_H_
 
-#if defined(__linux__)
 #if defined(__KERNEL__)
 #endif
 #include <asm/ioctl.h>		/* For _IO* macros */
@@ -46,22 +45,6 @@
 #define DRM_IOC_WRITE		_IOC_WRITE
 #define DRM_IOC_READWRITE	_IOC_READ|_IOC_WRITE
 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
-#if defined(__FreeBSD__) && defined(IN_MODULE)
-/* Prevent name collision when including sys/ioccom.h */
-#undef ioctl
-#include <sys/ioccom.h>
-#define ioctl(a,b,c)		xf86ioctl(a,b,c)
-#else
-#include <sys/ioccom.h>
-#endif				/* __FreeBSD__ && xf86ioctl */
-#define DRM_IOCTL_NR(n)		((n) & 0xff)
-#define DRM_IOC_VOID		IOC_VOID
-#define DRM_IOC_READ		IOC_OUT
-#define DRM_IOC_WRITE		IOC_IN
-#define DRM_IOC_READWRITE	IOC_INOUT
-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
-#endif
 
 #define DRM_MAJOR       226
 #define DRM_MAX_MINOR   15
@@ -471,6 +454,7 @@ struct drm_irq_busid {
 enum drm_vblank_seq_type {
 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
+	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
 	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking */
@@ -503,6 +487,19 @@ union drm_wait_vblank {
 	struct drm_wait_vblank_reply reply;
 };
 
+#define _DRM_PRE_MODESET 1
+#define _DRM_POST_MODESET 2
+
+/**
+ * DRM_IOCTL_MODESET_CTL ioctl argument type
+ *
+ * \sa drmModesetCtl().
+ */
+struct drm_modeset_ctl {
+	uint32_t crtc;
+	uint32_t cmd;
+};
+
 /**
  * DRM_IOCTL_AGP_ENABLE ioctl argument type.
  *
@@ -573,6 +570,34 @@ struct drm_set_version {
 	int drm_dd_minor;
 };
 
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+struct drm_gem_close {
+	/** Handle of the object to be closed. */
+	uint32_t handle;
+	uint32_t pad;
+};
+
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+struct drm_gem_flink {
+	/** Handle for the object being named */
+	uint32_t handle;
+
+	/** Returned global name */
+	uint32_t name;
+};
+
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+struct drm_gem_open {
+	/** Name of object being opened */
+	uint32_t name;
+
+	/** Returned handle for the object */
+	uint32_t handle;
+
+	/** Returned size of the object */
+	uint64_t size;
+};
+
 #define DRM_IOCTL_BASE			'd'
 #define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
 #define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
@@ -587,6 +612,10 @@ struct drm_set_version {
 #define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
 #define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
+#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
+#define DRM_IOCTL_GEM_CLOSE		DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
 
 #define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
 #define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 1c1b13e29223..59c796b46ee7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -104,6 +104,7 @@ struct drm_device;
 #define DRIVER_DMA_QUEUE   0x200
 #define DRIVER_FB_DMA      0x400
 #define DRIVER_IRQ_VBL2    0x800
+#define DRIVER_GEM         0x1000
 
 /***********************************************************************/
 /** \name Begin the DRM... */
@@ -387,6 +388,10 @@ struct drm_file {
 	struct drm_minor *minor;
 	int remove_auth_on_close;
 	unsigned long lock_count;
+	/** Mapping of mm object handles to object pointers. */
+	struct idr object_idr;
+	/** Lock for synchronization of access to object_idr. */
+	spinlock_t table_lock;
 	struct file *filp;
 	void *driver_priv;
 };
@@ -558,6 +563,56 @@ struct drm_ati_pcigart_info {
 };
 
 /**
+ * This structure defines the drm_mm memory object, which will be used by the
+ * DRM for its buffer objects.
+ */
+struct drm_gem_object {
+	/** Reference count of this object */
+	struct kref refcount;
+
+	/** Handle count of this object. Each handle also holds a reference */
+	struct kref handlecount;
+
+	/** Related drm device */
+	struct drm_device *dev;
+
+	/** File representing the shmem storage */
+	struct file *filp;
+
+	/**
+	 * Size of the object, in bytes.  Immutable over the object's
+	 * lifetime.
+	 */
+	size_t size;
+
+	/**
+	 * Global name for this object, starts at 1. 0 means unnamed.
+	 * Access is covered by the object_name_lock in the related drm_device
+	 */
+	int name;
+
+	/**
+	 * Memory domains. These monitor which caches contain read/write data
+	 * related to the object. When transitioning from one set of domains
+	 * to another, the driver is called to ensure that caches are suitably
+	 * flushed and invalidated
+	 */
+	uint32_t read_domains;
+	uint32_t write_domain;
+
+	/**
+	 * While validating an exec operation, the
+	 * new read/write domain values are computed here.
+	 * They will be transferred to the above values
+	 * at the point that any cache flushing occurs
+	 */
+	uint32_t pending_read_domains;
+	uint32_t pending_write_domain;
+
+	void *driver_private;
+};
+
+/**
  * DRM driver structure. This structure represent the common code for
  * a family of cards. There will one drm_device for each card present
  * in this family
@@ -580,11 +635,54 @@ struct drm_driver {
 	int (*kernel_context_switch) (struct drm_device *dev, int old,
 				      int new);
 	void (*kernel_context_switch_unlock) (struct drm_device *dev);
-	int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
-	int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
 	int (*dri_library_name) (struct drm_device *dev, char *buf);
 
 	/**
+	 * get_vblank_counter - get raw hardware vblank counter
+	 * @dev: DRM device
+	 * @crtc: counter to fetch
+	 *
+	 * Driver callback for fetching a raw hardware vblank counter
+	 * for @crtc.  If a device doesn't have a hardware counter, the
+	 * driver can simply return the value of drm_vblank_count and
+	 * make the enable_vblank() and disable_vblank() hooks into no-ops,
+	 * leaving interrupts enabled at all times.
+	 *
+	 * Wraparound handling and loss of events due to modesetting is dealt
+	 * with in the DRM core code.
+	 *
+	 * RETURNS
+	 * Raw vblank counter value.
+	 */
+	u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
+
+	/**
+	 * enable_vblank - enable vblank interrupt events
+	 * @dev: DRM device
+	 * @crtc: which irq to enable
+	 *
+	 * Enable vblank interrupts for @crtc.  If the device doesn't have
+	 * a hardware vblank counter, this routine should be a no-op, since
+	 * interrupts will have to stay on to keep the count accurate.
+	 *
+	 * RETURNS
+	 * Zero on success, appropriate errno if the given @crtc's vblank
+	 * interrupt cannot be enabled.
+	 */
+	int (*enable_vblank) (struct drm_device *dev, int crtc);
+
+	/**
+	 * disable_vblank - disable vblank interrupt events
+	 * @dev: DRM device
+	 * @crtc: which irq to enable
+	 *
+	 * Disable vblank interrupts for @crtc.  If the device doesn't have
+	 * a hardware vblank counter, this routine should be a no-op, since
+	 * interrupts will have to stay on to keep the count accurate.
+	 */
+	void (*disable_vblank) (struct drm_device *dev, int crtc);
+
+	/**
 	 * Called by \c drm_device_is_agp.  Typically used to determine if a
 	 * card is really attached to AGP or not.
 	 *
@@ -601,7 +699,7 @@ struct drm_driver {
 
 	irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
 	void (*irq_preinstall) (struct drm_device *dev);
-	void (*irq_postinstall) (struct drm_device *dev);
+	int (*irq_postinstall) (struct drm_device *dev);
 	void (*irq_uninstall) (struct drm_device *dev);
 	void (*reclaim_buffers) (struct drm_device *dev,
 				 struct drm_file * file_priv);
@@ -614,6 +712,18 @@ struct drm_driver {
 	void (*set_version) (struct drm_device *dev,
 			     struct drm_set_version *sv);
 
+	int (*proc_init)(struct drm_minor *minor);
+	void (*proc_cleanup)(struct drm_minor *minor);
+
+	/**
+	 * Driver-specific constructor for drm_gem_objects, to set up
+	 * obj->driver_private.
+	 *
+	 * Returns 0 on success.
+	 */
+	int (*gem_init_object) (struct drm_gem_object *obj);
+	void (*gem_free_object) (struct drm_gem_object *obj);
+
 	int major;
 	int minor;
 	int patchlevel;
@@ -714,7 +824,6 @@ struct drm_device {
 
 	/** \name Context support */
 	/*@{ */
-	int irq;			/**< Interrupt used by board */
 	int irq_enabled;		/**< True if irq handler is enabled */
 	__volatile__ long context_flag;	/**< Context swapping flag */
 	__volatile__ long interrupt_flag; /**< Interruption handler flag */
@@ -730,13 +839,28 @@ struct drm_device {
 	/** \name VBLANK IRQ support */
 	/*@{ */
 
-	wait_queue_head_t vbl_queue;	/**< VBLANK wait queue */
-	atomic_t vbl_received;
-	atomic_t vbl_received2;		/**< number of secondary VBLANK interrupts */
+	/*
+	 * At load time, disabling the vblank interrupt won't be allowed since
+	 * old clients may not call the modeset ioctl and therefore misbehave.
+	 * Once the modeset ioctl *has* been called though, we can safely
+	 * disable them when unused.
+	 */
+	int vblank_disable_allowed;
+
+	wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
+	atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
 	spinlock_t vbl_lock;
-	struct list_head vbl_sigs;		/**< signal list to send on VBLANK */
-	struct list_head vbl_sigs2;	/**< signals to send on secondary VBLANK */
-	unsigned int vbl_pending;
+	struct list_head *vbl_sigs;	/**< signal list to send on VBLANK */
+	atomic_t vbl_signal_pending;    /* number of signals pending on all crtcs*/
+	atomic_t *vblank_refcount;      /* number of users of vblank interruptsper crtc */
+	u32 *last_vblank;               /* protected by dev->vbl_lock, used */
+					/* for wraparound handling */
+	int *vblank_enabled;            /* so we don't call enable more than
+					   once per disable */
+	int *vblank_inmodeset;          /* Display driver is setting mode */
+	struct timer_list vblank_disable_timer;
+
+	u32 max_vblank_count;           /**< size of vblank counter register */
 	spinlock_t tasklet_lock;	/**< For drm_locked_tasklet */
 	void (*locked_tasklet_func)(struct drm_device *dev);
 
@@ -757,6 +881,7 @@ struct drm_device {
 	struct pci_controller *hose;
 #endif
 	struct drm_sg_mem *sg;	/**< Scatter gather memory */
+	int num_crtcs;                  /**< Number of CRTCs on this device */
 	void *dev_private;		/**< device private data */
 	struct drm_sigdata sigdata;	   /**< For block_all_signals */
 	sigset_t sigmask;
@@ -771,8 +896,29 @@ struct drm_device {
 	spinlock_t drw_lock;
 	struct idr drw_idr;
 	/*@} */
+
+	/** \name GEM information */
+	/*@{ */
+	spinlock_t object_name_lock;
+	struct idr object_name_idr;
+	atomic_t object_count;
+	atomic_t object_memory;
+	atomic_t pin_count;
+	atomic_t pin_memory;
+	atomic_t gtt_count;
+	atomic_t gtt_memory;
+	uint32_t gtt_total;
+	uint32_t invalidate_domains;    /* domains pending invalidation */
+	uint32_t flush_domains;         /* domains pending flush */
+	/*@} */
+
 };
 
+static inline int drm_dev_to_irq(struct drm_device *dev)
+{
+	return dev->pdev->irq;
+}
+
 static __inline__ int drm_core_check_feature(struct drm_device *dev,
 					     int feature)
 {
@@ -867,6 +1013,11 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
 extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
 extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+				       struct page **pages,
+				       unsigned long num_pages,
+				       uint32_t gtt_offset,
+				       uint32_t type);
 extern int drm_unbind_agp(DRM_AGP_MEM * handle);
 
 				/* Misc. IOCTL support (drm_ioctl.h) */
@@ -929,6 +1080,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
 extern int drm_authmagic(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 
+/* Cache management (drm_cache.c) */
+void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+
 				/* Locking IOCTL support (drm_lock.h) */
 extern int drm_lock(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv);
@@ -985,15 +1139,25 @@ extern void drm_core_reclaim_buffers(struct drm_device *dev,
 extern int drm_control(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv);
 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+extern int drm_irq_install(struct drm_device *dev);
 extern int drm_irq_uninstall(struct drm_device *dev);
 extern void drm_driver_irq_preinstall(struct drm_device *dev);
 extern void drm_driver_irq_postinstall(struct drm_device *dev);
 extern void drm_driver_irq_uninstall(struct drm_device *dev);
 
+extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
-			   struct drm_file *file_priv);
+			   struct drm_file *filp);
 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
-extern void drm_vbl_send_signals(struct drm_device *dev);
+extern void drm_locked_tasklet(struct drm_device *dev,
+			       void(*func)(struct drm_device *));
+extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern void drm_handle_vblank(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, int crtc);
+extern void drm_vblank_put(struct drm_device *dev, int crtc);
+/* Modesetting support */
+extern int drm_modeset_ctl(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
 extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
 
 				/* AGP/GART support (drm_agpsupport.h) */
@@ -1026,6 +1190,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
 extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
 extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
 extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
+extern void drm_agp_chipset_flush(struct drm_device *dev);
 
 				/* Stub support (drm_stub.h) */
 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
@@ -1088,6 +1253,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
 extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
 extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
 
+/* Graphics Execution Manager library functions (drm_gem.c) */
+int drm_gem_init(struct drm_device *dev);
+void drm_gem_object_free(struct kref *kref);
+struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
+					    size_t size);
+void drm_gem_object_handle_free(struct kref *kref);
+
+static inline void
+drm_gem_object_reference(struct drm_gem_object *obj)
+{
+	kref_get(&obj->refcount);
+}
+
+static inline void
+drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+	if (obj == NULL)
+		return;
+
+	kref_put(&obj->refcount, drm_gem_object_free);
+}
+
+int drm_gem_handle_create(struct drm_file *file_priv,
+			  struct drm_gem_object *obj,
+			  int *handlep);
+
+static inline void
+drm_gem_object_handle_reference(struct drm_gem_object *obj)
+{
+	drm_gem_object_reference(obj);
+	kref_get(&obj->handlecount);
+}
+
+static inline void
+drm_gem_object_handle_unreference(struct drm_gem_object *obj)
+{
+	if (obj == NULL)
+		return;
+
+	/*
+	 * Must bump handle count first as this may be the last
+	 * ref, in which case the object would disappear before we
+	 * checked for a name
+	 */
+	kref_put(&obj->handlecount, drm_gem_object_handle_free);
+	drm_gem_object_unreference(obj);
+}
+
+struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
+					     struct drm_file *filp,
+					     int handle);
+int drm_gem_close_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+int drm_gem_open_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
+void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
+
 extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev);
 extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
 extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 135bd19499fc..da04109741e8 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -84,18 +84,18 @@
 	{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -113,8 +113,10 @@
 	{0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
 	{0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
 	{0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
-	{0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
-	{0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+	{0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
+	{0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
+	{0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \
+	{0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
 	{0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
@@ -122,16 +124,16 @@
 	{0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
-	{0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
-	{0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
@@ -237,6 +239,10 @@
 	{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
 	{0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+	{0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+	{0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+	{0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
+	{0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
 	{0, 0, 0}
 
 #define r128_PCI_IDS \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 05c66cf03a9e..eb4b35031a55 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_GET_VBLANK_PIPE	0x0e
 #define DRM_I915_VBLANK_SWAP	0x0f
 #define DRM_I915_HWS_ADDR	0x11
+#define DRM_I915_GEM_INIT	0x13
+#define DRM_I915_GEM_EXECBUFFER	0x14
+#define DRM_I915_GEM_PIN	0x15
+#define DRM_I915_GEM_UNPIN	0x16
+#define DRM_I915_GEM_BUSY	0x17
+#define DRM_I915_GEM_THROTTLE	0x18
+#define DRM_I915_GEM_ENTERVT	0x19
+#define DRM_I915_GEM_LEAVEVT	0x1a
+#define DRM_I915_GEM_CREATE	0x1b
+#define DRM_I915_GEM_PREAD	0x1c
+#define DRM_I915_GEM_PWRITE	0x1d
+#define DRM_I915_GEM_MMAP	0x1e
+#define DRM_I915_GEM_SET_DOMAIN	0x1f
+#define DRM_I915_GEM_SW_FINISH	0x20
+#define DRM_I915_GEM_SET_TILING	0x21
+#define DRM_I915_GEM_GET_TILING	0x22
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
 #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
 #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
+#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
+#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
+#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -200,6 +230,8 @@ typedef struct drm_i915_irq_wait {
 #define I915_PARAM_IRQ_ACTIVE            1
 #define I915_PARAM_ALLOW_BATCHBUFFER     2
 #define I915_PARAM_LAST_DISPATCH         3
+#define I915_PARAM_CHIPSET_ID            4
+#define I915_PARAM_HAS_GEM               5
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -267,4 +299,305 @@ typedef struct drm_i915_hws_addr {
 	uint64_t addr;
 } drm_i915_hws_addr_t;
 
+struct drm_i915_gem_init {
+	/**
+	 * Beginning offset in the GTT to be managed by the DRM memory
+	 * manager.
+	 */
+	uint64_t gtt_start;
+	/**
+	 * Ending offset in the GTT to be managed by the DRM memory
+	 * manager.
+	 */
+	uint64_t gtt_end;
+};
+
+struct drm_i915_gem_create {
+	/**
+	 * Requested size for the object.
+	 *
+	 * The (page-aligned) allocated size for the object will be returned.
+	 */
+	uint64_t size;
+	/**
+	 * Returned handle for the object.
+	 *
+	 * Object handles are nonzero.
+	 */
+	uint32_t handle;
+	uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+	/** Handle for the object being read. */
+	uint32_t handle;
+	uint32_t pad;
+	/** Offset into the object to read from */
+	uint64_t offset;
+	/** Length of data to read */
+	uint64_t size;
+	/**
+	 * Pointer to write the data into.
+	 *
+	 * This is a fixed-size type for 32/64 compatibility.
+	 */
+	uint64_t data_ptr;
+};
+
+struct drm_i915_gem_pwrite {
+	/** Handle for the object being written to. */
+	uint32_t handle;
+	uint32_t pad;
+	/** Offset into the object to write to */
+	uint64_t offset;
+	/** Length of data to write */
+	uint64_t size;
+	/**
+	 * Pointer to read the data from.
+	 *
+	 * This is a fixed-size type for 32/64 compatibility.
+	 */
+	uint64_t data_ptr;
+};
+
+struct drm_i915_gem_mmap {
+	/** Handle for the object being mapped. */
+	uint32_t handle;
+	uint32_t pad;
+	/** Offset in the object to map. */
+	uint64_t offset;
+	/**
+	 * Length of data to map.
+	 *
+	 * The value will be page-aligned.
+	 */
+	uint64_t size;
+	/**
+	 * Returned pointer the data was mapped at.
+	 *
+	 * This is a fixed-size type for 32/64 compatibility.
+	 */
+	uint64_t addr_ptr;
+};
+
+struct drm_i915_gem_set_domain {
+	/** Handle for the object */
+	uint32_t handle;
+
+	/** New read domains */
+	uint32_t read_domains;
+
+	/** New write domain */
+	uint32_t write_domain;
+};
+
+struct drm_i915_gem_sw_finish {
+	/** Handle for the object */
+	uint32_t handle;
+};
+
+struct drm_i915_gem_relocation_entry {
+	/**
+	 * Handle of the buffer being pointed to by this relocation entry.
+	 *
+	 * It's appealing to make this be an index into the mm_validate_entry
+	 * list to refer to the buffer, but this allows the driver to create
+	 * a relocation list for state buffers and not re-write it per
+	 * exec using the buffer.
+	 */
+	uint32_t target_handle;
+
+	/**
+	 * Value to be added to the offset of the target buffer to make up
+	 * the relocation entry.
+	 */
+	uint32_t delta;
+
+	/** Offset in the buffer the relocation entry will be written into */
+	uint64_t offset;
+
+	/**
+	 * Offset value of the target buffer that the relocation entry was last
+	 * written as.
+	 *
+	 * If the buffer has the same offset as last time, we can skip syncing
+	 * and writing the relocation.  This value is written back out by
+	 * the execbuffer ioctl when the relocation is written.
+	 */
+	uint64_t presumed_offset;
+
+	/**
+	 * Target memory domains read by this operation.
+	 */
+	uint32_t read_domains;
+
+	/**
+	 * Target memory domains written by this operation.
+	 *
+	 * Note that only one domain may be written by the whole
+	 * execbuffer operation, so that where there are conflicts,
+	 * the application will get -EINVAL back.
+	 */
+	uint32_t write_domain;
+};
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU		0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER		0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER		0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND		0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX		0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT		0x00000040
+/** @} */
+
+struct drm_i915_gem_exec_object {
+	/**
+	 * User's handle for a buffer to be bound into the GTT for this
+	 * operation.
+	 */
+	uint32_t handle;
+
+	/** Number of relocations to be performed on this buffer */
+	uint32_t relocation_count;
+	/**
+	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
+	 * the relocations to be performed in this buffer.
+	 */
+	uint64_t relocs_ptr;
+
+	/** Required alignment in graphics aperture */
+	uint64_t alignment;
+
+	/**
+	 * Returned value of the updated offset of the object, for future
+	 * presumed_offset writes.
+	 */
+	uint64_t offset;
+};
+
+struct drm_i915_gem_execbuffer {
+	/**
+	 * List of buffers to be validated with their relocations to be
+	 * performend on them.
+	 *
+	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+	 *
+	 * These buffers must be listed in an order such that all relocations
+	 * a buffer is performing refer to buffers that have already appeared
+	 * in the validate list.
+	 */
+	uint64_t buffers_ptr;
+	uint32_t buffer_count;
+
+	/** Offset in the batchbuffer to start execution from. */
+	uint32_t batch_start_offset;
+	/** Bytes used in batchbuffer from batch_start_offset */
+	uint32_t batch_len;
+	uint32_t DR1;
+	uint32_t DR4;
+	uint32_t num_cliprects;
+	/** This is a struct drm_clip_rect *cliprects */
+	uint64_t cliprects_ptr;
+};
+
+struct drm_i915_gem_pin {
+	/** Handle of the buffer to be pinned. */
+	uint32_t handle;
+	uint32_t pad;
+
+	/** alignment required within the aperture */
+	uint64_t alignment;
+
+	/** Returned GTT offset of the buffer. */
+	uint64_t offset;
+};
+
+struct drm_i915_gem_unpin {
+	/** Handle of the buffer to be unpinned. */
+	uint32_t handle;
+	uint32_t pad;
+};
+
+struct drm_i915_gem_busy {
+	/** Handle of the buffer to check for busy */
+	uint32_t handle;
+
+	/** Return busy status (1 if busy, 0 if idle) */
+	uint32_t busy;
+};
+
+#define I915_TILING_NONE	0
+#define I915_TILING_X		1
+#define I915_TILING_Y		2
+
+#define I915_BIT_6_SWIZZLE_NONE		0
+#define I915_BIT_6_SWIZZLE_9		1
+#define I915_BIT_6_SWIZZLE_9_10		2
+#define I915_BIT_6_SWIZZLE_9_11		3
+#define I915_BIT_6_SWIZZLE_9_10_11	4
+/* Not seen by userland */
+#define I915_BIT_6_SWIZZLE_UNKNOWN	5
+
+struct drm_i915_gem_set_tiling {
+	/** Handle of the buffer to have its tiling state updated */
+	uint32_t handle;
+
+	/**
+	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+	 * I915_TILING_Y).
+	 *
+	 * This value is to be set on request, and will be updated by the
+	 * kernel on successful return with the actual chosen tiling layout.
+	 *
+	 * The tiling mode may be demoted to I915_TILING_NONE when the system
+	 * has bit 6 swizzling that can't be managed correctly by GEM.
+	 *
+	 * Buffer contents become undefined when changing tiling_mode.
+	 */
+	uint32_t tiling_mode;
+
+	/**
+	 * Stride in bytes for the object when in I915_TILING_X or
+	 * I915_TILING_Y.
+	 */
+	uint32_t stride;
+
+	/**
+	 * Returned address bit 6 swizzling required for CPU access through
+	 * mmap mapping.
+	 */
+	uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+	/** Handle of the buffer to get tiling state for. */
+	uint32_t handle;
+
+	/**
+	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+	 * I915_TILING_Y).
+	 */
+	uint32_t tiling_mode;
+
+	/**
+	 * Returned address bit 6 swizzling required for CPU access through
+	 * mmap mapping.
+	 */
+	uint32_t swizzle_mode;
+};
+
 #endif				/* _I915_DRM_H_ */
diff --git a/mm/shmem.c b/mm/shmem.c
index bf66d0191baf..d87958a5f03e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2580,6 +2580,7 @@ put_memory:
 	shmem_unacct_size(flags, size);
 	return ERR_PTR(error);
 }
+EXPORT_SYMBOL_GPL(shmem_file_setup);
 
 /**
  * shmem_zero_setup - setup a shared anonymous mapping