summary refs log tree commit diff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c858
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c605
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1142
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c222
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c623
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c386
7 files changed, 3846 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
new file mode 100644
index 000000000000..a9e60464df74
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
+
+i915-$(CONFIG_COMPAT)   += i915_ioc32.o
+
+obj-$(CONFIG_DRM_I915)  += i915.o
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
new file mode 100644
index 000000000000..88974342933c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -0,0 +1,858 @@
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* Really want an OS-independent resettable timer.  Would like to have
+ * this loop run for (eg) 3 sec, but have the timer reset every time
+ * the head pointer changes, so that EBUSY only happens if the ring
+ * actually stalls for (eg) 3 seconds.
+ */
+int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+	u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	int i;
+
+	for (i = 0; i < 10000; i++) {
+		ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->Size;
+		if (ring->space >= n)
+			return 0;
+
+		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+		if (ring->head != last_head)
+			i = 0;
+
+		last_head = ring->head;
+	}
+
+	return -EBUSY;
+}
+
+void i915_kernel_lost_context(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+
+	ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
+	ring->space = ring->head - (ring->tail + 8);
+	if (ring->space < 0)
+		ring->space += ring->Size;
+
+	if (ring->head == ring->tail)
+		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+}
+
+static int i915_dma_cleanup(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq)
+		drm_irq_uninstall(dev);
+
+	if (dev_priv->ring.virtual_start) {
+		drm_core_ioremapfree(&dev_priv->ring.map, dev);
+		dev_priv->ring.virtual_start = 0;
+		dev_priv->ring.map.handle = 0;
+		dev_priv->ring.map.size = 0;
+	}
+
+	if (dev_priv->status_page_dmah) {
+		drm_pci_free(dev, dev_priv->status_page_dmah);
+		dev_priv->status_page_dmah = NULL;
+		/* Need to rewrite hardware status page */
+		I915_WRITE(0x02080, 0x1ffff000);
+	}
+
+	if (dev_priv->status_gfx_addr) {
+		dev_priv->status_gfx_addr = 0;
+		drm_core_ioremapfree(&dev_priv->hws_map, dev);
+		I915_WRITE(0x2080, 0x1ffff000);
+	}
+
+	return 0;
+}
+
+static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("can not find sarea!\n");
+		i915_dma_cleanup(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio_map) {
+		i915_dma_cleanup(dev);
+		DRM_ERROR("can not find mmio map!\n");
+		return -EINVAL;
+	}
+
+	dev_priv->sarea_priv = (drm_i915_sarea_t *)
+	    ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
+
+	dev_priv->ring.Start = init->ring_start;
+	dev_priv->ring.End = init->ring_end;
+	dev_priv->ring.Size = init->ring_size;
+	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+	dev_priv->ring.map.offset = init->ring_start;
+	dev_priv->ring.map.size = init->ring_size;
+	dev_priv->ring.map.type = 0;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->ring.map, dev);
+
+	if (dev_priv->ring.map.handle == NULL) {
+		i915_dma_cleanup(dev);
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	dev_priv->cpp = init->cpp;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->current_page = 0;
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+
+	/* We are using separate values as placeholders for mechanisms for
+	 * private backbuffer/depthbuffer usage.
+	 */
+	dev_priv->use_mi_batchbuffer_start = 0;
+	if (IS_I965G(dev)) /* 965 doesn't support older method */
+		dev_priv->use_mi_batchbuffer_start = 1;
+
+	/* Allow hardware batchbuffers unless told otherwise.
+	 */
+	dev_priv->allow_batchbuffer = 1;
+
+	/* Program Hardware Status Page */
+	if (!I915_NEED_GFX_HWS(dev)) {
+		dev_priv->status_page_dmah =
+			drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+		if (!dev_priv->status_page_dmah) {
+			i915_dma_cleanup(dev);
+			DRM_ERROR("Can not allocate hardware status page\n");
+			return -ENOMEM;
+		}
+		dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+		dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+		memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+		I915_WRITE(0x02080, dev_priv->dma_status_page);
+	}
+	DRM_DEBUG("Enabled hardware status page\n");
+	return 0;
+}
+
+static int i915_dma_resume(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	if (!dev_priv->sarea) {
+		DRM_ERROR("can not find sarea!\n");
+		return -EINVAL;
+	}
+
+	if (!dev_priv->mmio_map) {
+		DRM_ERROR("can not find mmio map!\n");
+		return -EINVAL;
+	}
+
+	if (dev_priv->ring.map.handle == NULL) {
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Program Hardware Status Page */
+	if (!dev_priv->hw_status_page) {
+		DRM_ERROR("Can not find hardware status page\n");
+		return -EINVAL;
+	}
+	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+
+	if (dev_priv->status_gfx_addr != 0)
+		I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+	else
+		I915_WRITE(0x02080, dev_priv->dma_status_page);
+	DRM_DEBUG("Enabled hardware status page\n");
+
+	return 0;
+}
+
+static int i915_dma_init(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_init_t *init = data;
+	int retcode = 0;
+
+	switch (init->func) {
+	case I915_INIT_DMA:
+		retcode = i915_initialize(dev, init);
+		break;
+	case I915_CLEANUP_DMA:
+		retcode = i915_dma_cleanup(dev);
+		break;
+	case I915_RESUME_DMA:
+		retcode = i915_dma_resume(dev);
+		break;
+	default:
+		retcode = -EINVAL;
+		break;
+	}
+
+	return retcode;
+}
+
+/* Implement basically the same security restrictions as hardware does
+ * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
+ *
+ * Most of the calculations below involve calculating the size of a
+ * particular instruction.  It's important to get the size right as
+ * that tells us where the next instruction to check is.  Any illegal
+ * instruction detected will be given a size of zero, which is a
+ * signal to abort the rest of the buffer.
+ */
+static int do_validate_cmd(int cmd)
+{
+	switch (((cmd >> 29) & 0x7)) {
+	case 0x0:
+		switch ((cmd >> 23) & 0x3f) {
+		case 0x0:
+			return 1;	/* MI_NOOP */
+		case 0x4:
+			return 1;	/* MI_FLUSH */
+		default:
+			return 0;	/* disallow everything else */
+		}
+		break;
+	case 0x1:
+		return 0;	/* reserved */
+	case 0x2:
+		return (cmd & 0xff) + 2;	/* 2d commands */
+	case 0x3:
+		if (((cmd >> 24) & 0x1f) <= 0x18)
+			return 1;
+
+		switch ((cmd >> 24) & 0x1f) {
+		case 0x1c:
+			return 1;
+		case 0x1d:
+			switch ((cmd >> 16) & 0xff) {
+			case 0x3:
+				return (cmd & 0x1f) + 2;
+			case 0x4:
+				return (cmd & 0xf) + 2;
+			default:
+				return (cmd & 0xffff) + 2;
+			}
+		case 0x1e:
+			if (cmd & (1 << 23))
+				return (cmd & 0xffff) + 1;
+			else
+				return 1;
+		case 0x1f:
+			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
+				return (cmd & 0x1ffff) + 2;
+			else if (cmd & (1 << 17))	/* indirect random */
+				if ((cmd & 0xffff) == 0)
+					return 0;	/* unknown length, too hard */
+				else
+					return (((cmd & 0xffff) + 1) / 2) + 1;
+			else
+				return 2;	/* indirect sequential */
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int validate_cmd(int cmd)
+{
+	int ret = do_validate_cmd(cmd);
+
+/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */
+
+	return ret;
+}
+
+static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+
+	if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+		return -EINVAL;
+
+	BEGIN_LP_RING((dwords+1)&~1);
+
+	for (i = 0; i < dwords;) {
+		int cmd, sz;
+
+		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
+			return -EINVAL;
+
+		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+			return -EINVAL;
+
+		OUT_RING(cmd);
+
+		while (++i, --sz) {
+			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
+							 sizeof(cmd))) {
+				return -EINVAL;
+			}
+			OUT_RING(cmd);
+		}
+	}
+
+	if (dwords & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+
+	return 0;
+}
+
+static int i915_emit_box(struct drm_device * dev,
+			 struct drm_clip_rect __user * boxes,
+			 int i, int DR1, int DR4)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect box;
+	RING_LOCALS;
+
+	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
+		return -EFAULT;
+	}
+
+	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+		DRM_ERROR("Bad box %d,%d..%d,%d\n",
+			  box.x1, box.y1, box.x2, box.y2);
+		return -EINVAL;
+	}
+
+	if (IS_I965G(dev)) {
+		BEGIN_LP_RING(4);
+		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+		OUT_RING(DR4);
+		ADVANCE_LP_RING();
+	} else {
+		BEGIN_LP_RING(6);
+		OUT_RING(GFX_OP_DRAWRECT_INFO);
+		OUT_RING(DR1);
+		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+		OUT_RING(DR4);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
+
+	return 0;
+}
+
+/* XXX: Emitting the counter should really be moved to part of the IRQ
+ * emit. For now, do it in both places:
+ */
+
+static void i915_emit_breadcrumb(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
+
+	if (dev_priv->counter > 0x7FFFFFFFUL)
+		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+
+	BEGIN_LP_RING(4);
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(20);
+	OUT_RING(dev_priv->counter);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+}
+
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+				   drm_i915_cmdbuffer_t * cmd)
+{
+	int nbox = cmd->num_cliprects;
+	int i = 0, count, ret;
+
+	if (cmd->sz & 0x3) {
+		DRM_ERROR("alignment");
+		return -EINVAL;
+	}
+
+	i915_kernel_lost_context(dev);
+
+	count = nbox ? nbox : 1;
+
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			ret = i915_emit_box(dev, cmd->cliprects, i,
+					    cmd->DR1, cmd->DR4);
+			if (ret)
+				return ret;
+		}
+
+		ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+		if (ret)
+			return ret;
+	}
+
+	i915_emit_breadcrumb(dev);
+	return 0;
+}
+
+static int i915_dispatch_batchbuffer(struct drm_device * dev,
+				     drm_i915_batchbuffer_t * batch)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect __user *boxes = batch->cliprects;
+	int nbox = batch->num_cliprects;
+	int i = 0, count;
+	RING_LOCALS;
+
+	if ((batch->start | batch->used) & 0x7) {
+		DRM_ERROR("alignment");
+		return -EINVAL;
+	}
+
+	i915_kernel_lost_context(dev);
+
+	count = nbox ? nbox : 1;
+
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			int ret = i915_emit_box(dev, boxes, i,
+						batch->DR1, batch->DR4);
+			if (ret)
+				return ret;
+		}
+
+		if (dev_priv->use_mi_batchbuffer_start) {
+			BEGIN_LP_RING(2);
+			if (IS_I965G(dev)) {
+				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+				OUT_RING(batch->start);
+			} else {
+				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+			}
+			ADVANCE_LP_RING();
+		} else {
+			BEGIN_LP_RING(4);
+			OUT_RING(MI_BATCH_BUFFER);
+			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+			OUT_RING(batch->start + batch->used - 4);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		}
+	}
+
+	i915_emit_breadcrumb(dev);
+
+	return 0;
+}
+
+static int i915_dispatch_flip(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
+		  __FUNCTION__,
+		  dev_priv->current_page,
+		  dev_priv->sarea_priv->pf_current_page);
+
+	i915_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(2);
+	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(6);
+	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
+	OUT_RING(0);
+	if (dev_priv->current_page == 0) {
+		OUT_RING(dev_priv->back_offset);
+		dev_priv->current_page = 1;
+	} else {
+		OUT_RING(dev_priv->front_offset);
+		dev_priv->current_page = 0;
+	}
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(2);
+	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+
+	BEGIN_LP_RING(4);
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(20);
+	OUT_RING(dev_priv->counter);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+	return 0;
+}
+
+static int i915_quiescent(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	i915_kernel_lost_context(dev);
+	return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+}
+
+static int i915_flush_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return i915_quiescent(dev);
+}
+
+static int i915_batchbuffer(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i915_batchbuffer_t *batch = data;
+	int ret;
+
+	if (!dev_priv->allow_batchbuffer) {
+		DRM_ERROR("Batchbuffer ioctl disabled\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
+		  batch->start, batch->used, batch->num_cliprects);
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+						       batch->num_cliprects *
+						       sizeof(struct drm_clip_rect)))
+		return -EFAULT;
+
+	ret = i915_dispatch_batchbuffer(dev, batch);
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+	return ret;
+}
+
+static int i915_cmdbuffer(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i915_cmdbuffer_t *cmdbuf = data;
+	int ret;
+
+	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (cmdbuf->num_cliprects &&
+	    DRM_VERIFYAREA_READ(cmdbuf->cliprects,
+				cmdbuf->num_cliprects *
+				sizeof(struct drm_clip_rect))) {
+		DRM_ERROR("Fault accessing cliprects\n");
+		return -EFAULT;
+	}
+
+	ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+	if (ret) {
+		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+		return ret;
+	}
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+	return 0;
+}
+
+static int i915_flip_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	DRM_DEBUG("%s\n", __FUNCTION__);
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return i915_dispatch_flip(dev);
+}
+
+static int i915_getparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_getparam_t *param = data;
+	int value;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	switch (param->param) {
+	case I915_PARAM_IRQ_ACTIVE:
+		value = dev->irq ? 1 : 0;
+		break;
+	case I915_PARAM_ALLOW_BATCHBUFFER:
+		value = dev_priv->allow_batchbuffer ? 1 : 0;
+		break;
+	case I915_PARAM_LAST_DISPATCH:
+		value = READ_BREADCRUMB(dev_priv);
+		break;
+	default:
+		DRM_ERROR("Unknown parameter %d\n", param->param);
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("DRM_COPY_TO_USER failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int i915_setparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_setparam_t *param = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	switch (param->param) {
+	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+		if (!IS_I965G(dev))
+			dev_priv->use_mi_batchbuffer_start = param->value;
+		break;
+	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+		dev_priv->tex_lru_log_granularity = param->value;
+		break;
+	case I915_SETPARAM_ALLOW_BATCHBUFFER:
+		dev_priv->allow_batchbuffer = param->value;
+		break;
+	default:
+		DRM_ERROR("unknown parameter %d\n", param->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i915_set_status_page(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_hws_addr_t *hws = data;
+
+	if (!I915_NEED_GFX_HWS(dev))
+		return -EINVAL;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
+
+	dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+
+	dev_priv->hws_map.offset = dev->agp->base + hws->addr;
+	dev_priv->hws_map.size = 4*1024;
+	dev_priv->hws_map.type = 0;
+	dev_priv->hws_map.flags = 0;
+	dev_priv->hws_map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->hws_map, dev);
+	if (dev_priv->hws_map.handle == NULL) {
+		i915_dma_cleanup(dev);
+		dev_priv->status_gfx_addr = 0;
+		DRM_ERROR("can not ioremap virtual address for"
+				" G33 hw status page\n");
+		return -ENOMEM;
+	}
+	dev_priv->hw_status_page = dev_priv->hws_map.handle;
+
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+	DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
+			dev_priv->status_gfx_addr);
+	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
+	return 0;
+}
+
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long base, size;
+	int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+
+	/* i915 has 4 more counters */
+	dev->counters += 4;
+	dev->types[6] = _DRM_STAT_IRQ;
+	dev->types[7] = _DRM_STAT_PRIMARY;
+	dev->types[8] = _DRM_STAT_SECONDARY;
+	dev->types[9] = _DRM_STAT_DMA;
+
+	dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	memset(dev_priv, 0, sizeof(drm_i915_private_t));
+
+	dev->dev_private = (void *)dev_priv;
+
+	/* Add register map (needed for suspend/resume) */
+	base = drm_get_resource_start(dev, mmio_bar);
+	size = drm_get_resource_len(dev, mmio_bar);
+
+	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+			 _DRM_KERNEL | _DRM_DRIVER,
+			 &dev_priv->mmio_map);
+	return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->mmio_map)
+		drm_rmmap(dev, dev_priv->mmio_map);
+
+	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
+		 DRM_MEM_DRIVER);
+
+	return 0;
+}
+
+void i915_driver_lastclose(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	if (dev_priv->agp_heap)
+		i915_mem_takedown(&(dev_priv->agp_heap));
+
+	i915_dma_cleanup(dev);
+}
+
+void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+}
+
+struct drm_ioctl_desc i915_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
+	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
+};
+
+int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i9x5 is AGP.
+ */
+int i915_driver_device_is_agp(struct drm_device * dev)
+{
+	return 1;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
new file mode 100644
index 000000000000..93aed1c38bd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -0,0 +1,605 @@
+/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+	i915_PCI_IDS
+};
+
+enum pipe {
+    PIPE_A = 0,
+    PIPE_B,
+};
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (pipe == PIPE_A)
+		return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
+	else
+		return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->save_palette_a;
+	else
+		array = dev_priv->save_palette_b;
+
+	for(i = 0; i < 256; i++)
+		array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->save_palette_a;
+	else
+		array = dev_priv->save_palette_b;
+
+	for(i = 0; i < 256; i++)
+		I915_WRITE(reg + (i << 2), array[i]);
+}
+
+static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
+{
+	outb(reg, index_port);
+	return inb(data_port);
+}
+
+static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
+{
+	inb(st01);
+	outb(palette_enable | reg, VGA_AR_INDEX);
+	return inb(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
+{
+	inb(st01);
+	outb(palette_enable | reg, VGA_AR_INDEX);
+	outb(val, VGA_AR_DATA_WRITE);
+}
+
+static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+	outb(reg, index_port);
+	outb(val, data_port);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* VGA color palette registers */
+	dev_priv->saveDACMASK = inb(VGA_DACMASK);
+	/* DACCRX automatically increments during read */
+	outb(0, VGA_DACRX);
+	/* Read 3 bytes of color data from each index */
+	for (i = 0; i < 256 * 3; i++)
+		dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
+
+	/* MSR bits */
+	dev_priv->saveMSR = inb(VGA_MSR_READ);
+	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* CRT controller regs */
+	i915_write_indexed(cr_index, cr_data, 0x11,
+			   i915_read_indexed(cr_index, cr_data, 0x11) &
+			   (~0x80));
+	for (i = 0; i <= 0x24; i++)
+		dev_priv->saveCR[i] =
+			i915_read_indexed(cr_index, cr_data, i);
+	/* Make sure we don't turn off CR group 0 writes */
+	dev_priv->saveCR[0x11] &= ~0x80;
+
+	/* Attribute controller registers */
+	inb(st01);
+	dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
+	for (i = 0; i <= 0x14; i++)
+		dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
+	inb(st01);
+	outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
+	inb(st01);
+
+	/* Graphics controller registers */
+	for (i = 0; i < 9; i++)
+		dev_priv->saveGR[i] =
+			i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
+
+	dev_priv->saveGR[0x10] =
+		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+	dev_priv->saveGR[0x11] =
+		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+	dev_priv->saveGR[0x18] =
+		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+	/* Sequencer registers */
+	for (i = 0; i < 8; i++)
+		dev_priv->saveSR[i] =
+			i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* MSR bits */
+	outb(dev_priv->saveMSR, VGA_MSR_WRITE);
+	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* Sequencer registers, don't write SR07 */
+	for (i = 0; i < 7; i++)
+		i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
+				   dev_priv->saveSR[i]);
+
+	/* CRT controller regs */
+	/* Enable CR group 0 writes */
+	i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+	for (i = 0; i <= 0x24; i++)
+		i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
+
+	/* Graphics controller regs */
+	for (i = 0; i < 9; i++)
+		i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
+				   dev_priv->saveGR[i]);
+
+	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+			   dev_priv->saveGR[0x10]);
+	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+			   dev_priv->saveGR[0x11]);
+	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+			   dev_priv->saveGR[0x18]);
+
+	/* Attribute controller registers */
+	inb(st01);
+	for (i = 0; i <= 0x14; i++)
+		i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
+	inb(st01); /* switch back to index mode */
+	outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
+	inb(st01);
+
+	/* VGA color palette registers */
+	outb(dev_priv->saveDACMASK, VGA_DACMASK);
+	/* DACCRX automatically increments during read */
+	outb(0, VGA_DACWX);
+	/* Read 3 bytes of color data from each index */
+	for (i = 0; i < 256 * 3; i++)
+		outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
+
+}
+
+static int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (!dev || !dev_priv) {
+		printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
+		printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
+		return -ENODEV;
+	}
+
+	if (state.event == PM_EVENT_PRETHAW)
+		return 0;
+
+	pci_save_state(dev->pdev);
+	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+	/* Display arbitration control */
+	dev_priv->saveDSPARB = I915_READ(DSPARB);
+
+	/* Pipe & plane A info */
+	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+	dev_priv->saveFPA0 = I915_READ(FPA0);
+	dev_priv->saveFPA1 = I915_READ(FPA1);
+	dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+	if (IS_I965G(dev))
+		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+	dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
+	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+	dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
+	dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+	dev_priv->saveDSPABASE = I915_READ(DSPABASE);
+	if (IS_I965G(dev)) {
+		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+	}
+	i915_save_palette(dev, PIPE_A);
+	dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
+
+	/* Pipe & plane B info */
+	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+	dev_priv->saveFPB0 = I915_READ(FPB0);
+	dev_priv->saveFPB1 = I915_READ(FPB1);
+	dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+	if (IS_I965G(dev))
+		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+	dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
+	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+	dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
+	dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+	dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
+	if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
+		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+	}
+	i915_save_palette(dev, PIPE_B);
+	dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
+
+	/* CRT state */
+	dev_priv->saveADPA = I915_READ(ADPA);
+
+	/* LVDS state */
+	dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+	dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+	dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+	if (IS_I965G(dev))
+		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		dev_priv->saveLVDS = I915_READ(LVDS);
+	if (!IS_I830(dev) && !IS_845G(dev))
+		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+	dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
+	dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
+	dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
+
+	/* FIXME: save TV & SDVO state */
+
+	/* FBC state */
+	dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+	dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+	dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+	dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+
+	/* Interrupt state */
+	dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
+	dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
+	dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
+
+	/* VGA state */
+	dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
+	dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
+	dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
+	dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+	/* Clock gating state */
+	dev_priv->saveD_STATE = I915_READ(D_STATE);
+	dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
+
+	/* Cache mode state */
+	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+	/* Memory Arbitration state */
+	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+	/* Scratch space */
+	for (i = 0; i < 16; i++) {
+		dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
+		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+	}
+	for (i = 0; i < 3; i++)
+		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+	i915_save_vga(dev);
+
+	if (state.event == PM_EVENT_SUSPEND) {
+		/* Shut down the device */
+		pci_disable_device(dev->pdev);
+		pci_set_power_state(dev->pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	pci_set_power_state(dev->pdev, PCI_D0);
+	pci_restore_state(dev->pdev);
+	if (pci_enable_device(dev->pdev))
+		return -1;
+	pci_set_master(dev->pdev);
+
+	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+	I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+
+	/* Pipe & plane A info */
+	/* Prime the clock */
+	if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+		I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+			   ~DPLL_VCO_ENABLE);
+		udelay(150);
+	}
+	I915_WRITE(FPA0, dev_priv->saveFPA0);
+	I915_WRITE(FPA1, dev_priv->saveFPA1);
+	/* Actually enable it */
+	I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+	udelay(150);
+	if (IS_I965G(dev))
+		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+	udelay(150);
+
+	/* Restore mode */
+	I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+	I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+	I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+	I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+	/* Restore plane info */
+	I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+	I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+	I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
+	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+	if (IS_I965G(dev)) {
+		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+	}
+
+	I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+	i915_restore_palette(dev, PIPE_A);
+	/* Enable the plane */
+	I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+	I915_WRITE(DSPABASE, I915_READ(DSPABASE));
+
+	/* Pipe & plane B info */
+	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+		I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+			   ~DPLL_VCO_ENABLE);
+		udelay(150);
+	}
+	I915_WRITE(FPB0, dev_priv->saveFPB0);
+	I915_WRITE(FPB1, dev_priv->saveFPB1);
+	/* Actually enable it */
+	I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+	udelay(150);
+	if (IS_I965G(dev))
+		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+	udelay(150);
+
+	/* Restore mode */
+	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+	I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+	I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+	I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+	/* Restore plane info */
+	I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+	I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+	I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
+	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+	if (IS_I965G(dev)) {
+		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+	}
+
+	I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+
+	i915_restore_palette(dev, PIPE_B);
+	/* Enable the plane */
+	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+	I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
+
+	/* CRT state */
+	I915_WRITE(ADPA, dev_priv->saveADPA);
+
+	/* LVDS state */
+	if (IS_I965G(dev))
+		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		I915_WRITE(LVDS, dev_priv->saveLVDS);
+	if (!IS_I830(dev) && !IS_845G(dev))
+		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+
+	I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+	I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+	I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
+	I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
+	I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
+	I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+
+	/* FIXME: restore TV & SDVO state */
+
+	/* FBC info */
+	I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+	I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+	I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+	I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+
+	/* VGA state */
+	I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+	I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
+	I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
+	I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
+	udelay(150);
+
+	/* Clock gating state */
+	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
+	I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
+
+	/* Cache mode state */
+	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+
+	/* Memory arbitration state */
+	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+
+	for (i = 0; i < 16; i++) {
+		I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
+		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+	}
+	for (i = 0; i < 3; i++)
+		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+	i915_restore_vga(dev);
+
+	return 0;
+}
+
+static struct drm_driver driver = {
+	/* don't use mtrr's here, the Xserver or user space app should
+	 * deal with them for intel hardware.
+	 */
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
+	    DRIVER_IRQ_VBL2,
+	.load = i915_driver_load,
+	.unload = i915_driver_unload,
+	.lastclose = i915_driver_lastclose,
+	.preclose = i915_driver_preclose,
+	.suspend = i915_suspend,
+	.resume = i915_resume,
+	.device_is_agp = i915_driver_device_is_agp,
+	.vblank_wait = i915_driver_vblank_wait,
+	.vblank_wait2 = i915_driver_vblank_wait2,
+	.irq_preinstall = i915_driver_irq_preinstall,
+	.irq_postinstall = i915_driver_irq_postinstall,
+	.irq_uninstall = i915_driver_irq_uninstall,
+	.irq_handler = i915_driver_irq_handler,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = i915_ioctls,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+		 .compat_ioctl = i915_compat_ioctl,
+#endif
+	},
+
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init i915_init(void)
+{
+	driver.num_ioctls = i915_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit i915_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
new file mode 100644
index 000000000000..d7326d92a237
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -0,0 +1,1142 @@
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRV_H_
+#define _I915_DRV_H_
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
+
+#define DRIVER_NAME		"i915"
+#define DRIVER_DESC		"Intel Graphics"
+#define DRIVER_DATE		"20060119"
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ *      - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		6
+#define DRIVER_PATCHLEVEL	0
+
+typedef struct _drm_i915_ring_buffer {
+	int tail_mask;
+	unsigned long Start;
+	unsigned long End;
+	unsigned long Size;
+	u8 *virtual_start;
+	int head;
+	int tail;
+	int space;
+	drm_local_map_t map;
+} drm_i915_ring_buffer_t;
+
+struct mem_block {
+	struct mem_block *next;
+	struct mem_block *prev;
+	int start;
+	int size;
+	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+typedef struct _drm_i915_vbl_swap {
+	struct list_head head;
+	drm_drawable_t drw_id;
+	unsigned int pipe;
+	unsigned int sequence;
+} drm_i915_vbl_swap_t;
+
+typedef struct drm_i915_private {
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio_map;
+
+	drm_i915_sarea_t *sarea_priv;
+	drm_i915_ring_buffer_t ring;
+
+	drm_dma_handle_t *status_page_dmah;
+	void *hw_status_page;
+	dma_addr_t dma_status_page;
+	unsigned long counter;
+	unsigned int status_gfx_addr;
+	drm_local_map_t hws_map;
+
+	unsigned int cpp;
+	int back_offset;
+	int front_offset;
+	int current_page;
+	int page_flipping;
+	int use_mi_batchbuffer_start;
+
+	wait_queue_head_t irq_queue;
+	atomic_t irq_received;
+	atomic_t irq_emitted;
+
+	int tex_lru_log_granularity;
+	int allow_batchbuffer;
+	struct mem_block *agp_heap;
+	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
+	int vblank_pipe;
+
+	spinlock_t swaps_lock;
+	drm_i915_vbl_swap_t vbl_swaps;
+	unsigned int swaps_pending;
+
+	/* Register state */
+	u8 saveLBB;
+	u32 saveDSPACNTR;
+	u32 saveDSPBCNTR;
+	u32 saveDSPARB;
+	u32 savePIPEACONF;
+	u32 savePIPEBCONF;
+	u32 savePIPEASRC;
+	u32 savePIPEBSRC;
+	u32 saveFPA0;
+	u32 saveFPA1;
+	u32 saveDPLL_A;
+	u32 saveDPLL_A_MD;
+	u32 saveHTOTAL_A;
+	u32 saveHBLANK_A;
+	u32 saveHSYNC_A;
+	u32 saveVTOTAL_A;
+	u32 saveVBLANK_A;
+	u32 saveVSYNC_A;
+	u32 saveBCLRPAT_A;
+	u32 savePIPEASTAT;
+	u32 saveDSPASTRIDE;
+	u32 saveDSPASIZE;
+	u32 saveDSPAPOS;
+	u32 saveDSPABASE;
+	u32 saveDSPASURF;
+	u32 saveDSPATILEOFF;
+	u32 savePFIT_PGM_RATIOS;
+	u32 saveBLC_PWM_CTL;
+	u32 saveBLC_PWM_CTL2;
+	u32 saveFPB0;
+	u32 saveFPB1;
+	u32 saveDPLL_B;
+	u32 saveDPLL_B_MD;
+	u32 saveHTOTAL_B;
+	u32 saveHBLANK_B;
+	u32 saveHSYNC_B;
+	u32 saveVTOTAL_B;
+	u32 saveVBLANK_B;
+	u32 saveVSYNC_B;
+	u32 saveBCLRPAT_B;
+	u32 savePIPEBSTAT;
+	u32 saveDSPBSTRIDE;
+	u32 saveDSPBSIZE;
+	u32 saveDSPBPOS;
+	u32 saveDSPBBASE;
+	u32 saveDSPBSURF;
+	u32 saveDSPBTILEOFF;
+	u32 saveVCLK_DIVISOR_VGA0;
+	u32 saveVCLK_DIVISOR_VGA1;
+	u32 saveVCLK_POST_DIV;
+	u32 saveVGACNTRL;
+	u32 saveADPA;
+	u32 saveLVDS;
+	u32 saveLVDSPP_ON;
+	u32 saveLVDSPP_OFF;
+	u32 saveDVOA;
+	u32 saveDVOB;
+	u32 saveDVOC;
+	u32 savePP_ON;
+	u32 savePP_OFF;
+	u32 savePP_CONTROL;
+	u32 savePP_CYCLE;
+	u32 savePFIT_CONTROL;
+	u32 save_palette_a[256];
+	u32 save_palette_b[256];
+	u32 saveFBC_CFB_BASE;
+	u32 saveFBC_LL_BASE;
+	u32 saveFBC_CONTROL;
+	u32 saveFBC_CONTROL2;
+	u32 saveIER;
+	u32 saveIIR;
+	u32 saveIMR;
+	u32 saveCACHE_MODE_0;
+	u32 saveD_STATE;
+	u32 saveDSPCLK_GATE_D;
+	u32 saveMI_ARB_STATE;
+	u32 saveSWF0[16];
+	u32 saveSWF1[16];
+	u32 saveSWF2[3];
+	u8 saveMSR;
+	u8 saveSR[8];
+	u8 saveGR[25];
+	u8 saveAR_INDEX;
+	u8 saveAR[21];
+	u8 saveDACMASK;
+	u8 saveDACDATA[256*3]; /* 256 3-byte colors */
+	u8 saveCR[37];
+} drm_i915_private_t;
+
+extern struct drm_ioctl_desc i915_ioctls[];
+extern int i915_max_ioctl;
+
+				/* i915_dma.c */
+extern void i915_kernel_lost_context(struct drm_device * dev);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device *dev,
+				 struct drm_file *file_priv);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg);
+
+/* i915_irq.c */
+extern int i915_irq_emit(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int i915_irq_wait(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+
+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
+extern void i915_driver_irq_preinstall(struct drm_device * dev);
+extern void i915_driver_irq_postinstall(struct drm_device * dev);
+extern void i915_driver_irq_uninstall(struct drm_device * dev);
+extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern int i915_vblank_swap(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+
+/* i915_mem.c */
+extern int i915_mem_alloc(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+extern int i915_mem_free(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int i915_mem_init_heap(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+extern void i915_mem_takedown(struct mem_block **heap);
+extern void i915_mem_release(struct drm_device * dev,
+			     struct drm_file *file_priv, struct mem_block *heap);
+
+#define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
+#define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+#define I915_READ16(reg)	DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_WRITE16(reg,val)	DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+
+#define I915_VERBOSE 0
+
+#define RING_LOCALS	unsigned int outring, ringmask, outcount; \
+                        volatile char *virt;
+
+#define BEGIN_LP_RING(n) do {				\
+	if (I915_VERBOSE)				\
+		DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));	\
+	if (dev_priv->ring.space < (n)*4)		\
+		i915_wait_ring(dev, (n)*4, __func__);		\
+	outcount = 0;					\
+	outring = dev_priv->ring.tail;			\
+	ringmask = dev_priv->ring.tail_mask;		\
+	virt = dev_priv->ring.virtual_start;		\
+} while (0)
+
+#define OUT_RING(n) do {					\
+	if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
+	*(volatile unsigned int *)(virt + outring) = (n);	\
+        outcount++;						\
+	outring += 4;						\
+	outring &= ringmask;					\
+} while (0)
+
+#define ADVANCE_LP_RING() do {						\
+	if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);	\
+	dev_priv->ring.tail = outring;					\
+	dev_priv->ring.space -= outcount * 4;				\
+	I915_WRITE(LP_RING + RING_TAIL, outring);			\
+} while(0)
+
+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+
+/* Extended config space */
+#define LBB 0xf4
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define   VGA_MSR_MEM_EN (1<<1)
+#define   VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define   VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define   VGA_GR_MEM_READ_MODE_SHIFT 3
+#define     VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define   VGA_GR_MEM_MODE_MASK 0xc
+#define   VGA_GR_MEM_MODE_SHIFT 2
+#define   VGA_GR_MEM_A0000_AFFFF 0
+#define   VGA_GR_MEM_A0000_BFFFF 1
+#define   VGA_GR_MEM_B0000_B7FFF 2
+#define   VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+#define GFX_OP_USER_INTERRUPT		((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD			(7<<23)
+#define CMD_STORE_DWORD_IDX		((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
+
+#define INST_PARSER_CLIENT   0x00000000
+#define INST_OP_FLUSH        0x02000000
+#define INST_FLUSH_MAP_CACHE 0x00000001
+
+#define BB1_START_ADDR_MASK   (~0x7)
+#define BB1_PROTECTED         (1<<0)
+#define BB1_UNPROTECTED       (0<<0)
+#define BB2_END_ADDR_MASK     (~0x7)
+
+/* Framebuffer compression */
+#define FBC_CFB_BASE		0x03200 /* 4k page aligned */
+#define FBC_LL_BASE		0x03204 /* 4k page aligned */
+#define FBC_CONTROL		0x03208
+#define   FBC_CTL_EN		(1<<31)
+#define   FBC_CTL_PERIODIC	(1<<30)
+#define   FBC_CTL_INTERVAL_SHIFT (16)
+#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_CTL_STRIDE_SHIFT	(5)
+#define   FBC_CTL_FENCENO	(1<<0)
+#define FBC_COMMAND		0x0320c
+#define   FBC_CMD_COMPRESS	(1<<0)
+#define FBC_STATUS		0x03210
+#define   FBC_STAT_COMPRESSING	(1<<31)
+#define   FBC_STAT_COMPRESSED	(1<<30)
+#define   FBC_STAT_MODIFIED	(1<<29)
+#define   FBC_STAT_CURRENT_LINE	(1<<0)
+#define FBC_CONTROL2		0x03214
+#define   FBC_CTL_FENCE_DBL	(0<<4)
+#define   FBC_CTL_IDLE_IMM	(0<<2)
+#define   FBC_CTL_IDLE_FULL	(1<<2)
+#define   FBC_CTL_IDLE_LINE	(2<<2)
+#define   FBC_CTL_IDLE_DEBUG	(3<<2)
+#define   FBC_CTL_CPU_FENCE	(1<<1)
+#define   FBC_CTL_PLANEA	(0<<0)
+#define   FBC_CTL_PLANEB	(1<<0)
+#define FBC_FENCE_OFF		0x0321b
+
+#define FBC_LL_SIZE		(1536)
+#define FBC_LL_PAD		(32)
+
+/* Interrupt bits:
+ */
+#define USER_INT_FLAG    (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+#define HWB_OOM_FLAG     (1<<13) /* binner out of memory */
+
+#define I915REG_HWSTAM		0x02098
+#define I915REG_INT_IDENTITY_R	0x020a4
+#define I915REG_INT_MASK_R	0x020a8
+#define I915REG_INT_ENABLE_R	0x020a0
+
+#define I915REG_PIPEASTAT	0x70024
+#define I915REG_PIPEBSTAT	0x71024
+
+#define I915_VBLANK_INTERRUPT_ENABLE	(1UL<<17)
+#define I915_VBLANK_CLEAR		(1UL<<1)
+
+#define SRX_INDEX		0x3c4
+#define SRX_DATA		0x3c5
+#define SR01			1
+#define SR01_SCREEN_OFF		(1<<5)
+
+#define PPCR			0x61204
+#define PPCR_ON			(1<<0)
+
+#define DVOB			0x61140
+#define DVOB_ON			(1<<31)
+#define DVOC			0x61160
+#define DVOC_ON			(1<<31)
+#define LVDS			0x61180
+#define LVDS_ON			(1<<31)
+
+#define ADPA			0x61100
+#define ADPA_DPMS_MASK		(~(3<<10))
+#define ADPA_DPMS_ON		(0<<10)
+#define ADPA_DPMS_SUSPEND	(1<<10)
+#define ADPA_DPMS_STANDBY	(2<<10)
+#define ADPA_DPMS_OFF		(3<<10)
+
+#define NOPID                   0x2094
+#define LP_RING			0x2030
+#define HP_RING			0x2040
+/* The binner has its own ring buffer:
+ */
+#define HWB_RING		0x2400
+
+#define RING_TAIL		0x00
+#define TAIL_ADDR		0x001FFFF8
+#define RING_HEAD		0x04
+#define HEAD_WRAP_COUNT		0xFFE00000
+#define HEAD_WRAP_ONE		0x00200000
+#define HEAD_ADDR		0x001FFFFC
+#define RING_START		0x08
+#define START_ADDR		0x0xFFFFF000
+#define RING_LEN		0x0C
+#define RING_NR_PAGES		0x001FF000
+#define RING_REPORT_MASK	0x00000006
+#define RING_REPORT_64K		0x00000002
+#define RING_REPORT_128K	0x00000004
+#define RING_NO_REPORT		0x00000000
+#define RING_VALID_MASK		0x00000001
+#define RING_VALID		0x00000001
+#define RING_INVALID		0x00000000
+
+/* Instruction parser error reg:
+ */
+#define IPEIR			0x2088
+
+/* Scratch pad debug 0 reg:
+ */
+#define SCPD0			0x209c
+
+/* Error status reg:
+ */
+#define ESR			0x20b8
+
+/* Secondary DMA fetch address debug reg:
+ */
+#define DMA_FADD_S		0x20d4
+
+/* Memory Interface Arbitration State
+ */
+#define MI_ARB_STATE		0x20e4
+
+/* Cache mode 0 reg.
+ *  - Manipulating render cache behaviour is central
+ *    to the concept of zone rendering, tuning this reg can help avoid
+ *    unnecessary render cache reads and even writes (for z/stencil)
+ *    at beginning and end of scene.
+ *
+ * - To change a bit, write to this reg with a mask bit set and the
+ * bit of interest either set or cleared.  EG: (BIT<<16) | BIT to set.
+ */
+#define Cache_Mode_0		0x2120
+#define CACHE_MODE_0		0x2120
+#define CM0_MASK_SHIFT          16
+#define CM0_IZ_OPT_DISABLE      (1<<6)
+#define CM0_ZR_OPT_DISABLE      (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+
+
+/* Graphics flush control.  A CPU write flushes the GWB of all writes.
+ * The data is discarded.
+ */
+#define GFX_FLSH_CNTL		0x2170
+
+/* Binner control.  Defines the location of the bin pointer list:
+ */
+#define BINCTL			0x2420
+#define BC_MASK			(1 << 9)
+
+/* Binned scene info.
+ */
+#define BINSCENE		0x2428
+#define BS_OP_LOAD		(1 << 8)
+#define BS_MASK			(1 << 22)
+
+/* Bin command parser debug reg:
+ */
+#define BCPD			0x2480
+
+/* Bin memory control debug reg:
+ */
+#define BMCD			0x2484
+
+/* Bin data cache debug reg:
+ */
+#define BDCD			0x2488
+
+/* Binner pointer cache debug reg:
+ */
+#define BPCD			0x248c
+
+/* Binner scratch pad debug reg:
+ */
+#define BINSKPD			0x24f0
+
+/* HWB scratch pad debug reg:
+ */
+#define HWBSKPD			0x24f4
+
+/* Binner memory pool reg:
+ */
+#define BMP_BUFFER		0x2430
+#define BMP_PAGE_SIZE_4K	(0 << 10)
+#define BMP_BUFFER_SIZE_SHIFT	1
+#define BMP_ENABLE		(1 << 0)
+
+/* Get/put memory from the binner memory pool:
+ */
+#define BMP_GET			0x2438
+#define BMP_PUT			0x2440
+#define BMP_OFFSET_SHIFT	5
+
+/* 3D state packets:
+ */
+#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR       (0x1<<1)
+#define SC_ENABLE_MASK          (0x1<<0)
+#define SC_ENABLE               (0x1<<0)
+
+#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK      (0xffff<<16)
+#define SCI_XMIN_MASK      (0xffff<<0)
+#define SCI_YMAX_MASK      (0xffff<<16)
+#define SCI_XMAX_MASK      (0xffff<<0)
+
+#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+
+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+
+#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20)
+#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15)
+#define XY_SRC_COPY_BLT_DST_TILED	(1<<11)
+
+#define MI_BATCH_BUFFER		((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START	(0x31<<23)
+#define MI_BATCH_BUFFER_END	(0xA<<23)
+#define MI_BATCH_NON_SECURE	(1)
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+
+#define MI_WAIT_FOR_EVENT       ((0x3<<23))
+#define MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+
+#define MI_LOAD_SCAN_LINES_INCL  ((0x12<<23))
+
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP                (1<<22)
+#define DISPLAY_PLANE_A           (0<<20)
+#define DISPLAY_PLANE_B           (1<<20)
+
+/* Display regs */
+#define DSPACNTR                0x70180
+#define DSPBCNTR                0x71180
+#define DISPPLANE_SEL_PIPE_MASK                 (1<<24)
+
+/* Define the region of interest for the binner:
+ */
+#define CMD_OP_BIN_CONTROL	 ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
+
+#define CMD_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+
+#define CMD_MI_FLUSH         (0x04 << 23)
+#define MI_NO_WRITE_FLUSH    (1 << 2)
+#define MI_READ_FLUSH        (1 << 0)
+#define MI_EXE_FLUSH         (1 << 1)
+#define MI_END_SCENE         (1 << 4) /* flush binner and incr scene count */
+#define MI_SCENE_COUNT       (1 << 3) /* just increment scene count */
+
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
+#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
+
+#define BLC_PWM_CTL		0x61254
+#define BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
+
+#define BLC_PWM_CTL2		0x61250
+/**
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK		(0x7fff << 17)
+#define BLM_LEGACY_MODE				(1 << 16)
+/**
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT		(0)
+#define BACKLIGHT_DUTY_CYCLE_MASK		(0xffff)
+
+#define I915_GCFGC			0xf0
+#define I915_LOW_FREQUENCY_ENABLE		(1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ		(0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ		(4 << 4)
+#define I915_DISPLAY_CLOCK_MASK			(7 << 4)
+
+#define I855_HPLLCC			0xc0
+#define I855_CLOCK_CONTROL_MASK			(3 << 0)
+#define I855_CLOCK_133_200			(0 << 0)
+#define I855_CLOCK_100_200			(1 << 0)
+#define I855_CLOCK_100_133			(2 << 0)
+#define I855_CLOCK_166_250			(3 << 0)
+
+/* p317, 319
+ */
+#define VCLK2_VCO_M        0x6008 /* treat as 16 bit? (includes msbs) */
+#define VCLK2_VCO_N        0x600a
+#define VCLK2_VCO_DIV_SEL  0x6012
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV	    0x6010
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA1_PD_P2_DIV_4	(1 << 15)
+/** Overrides the p2 post divisor field */
+# define VGA1_PD_P1_DIV_2	(1 << 13)
+# define VGA1_PD_P1_SHIFT	8
+/** P1 value is 2 greater than this field */
+# define VGA1_PD_P1_MASK	(0x1f << 8)
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA0_PD_P2_DIV_4	(1 << 7)
+/** Overrides the p2 post divisor field */
+# define VGA0_PD_P1_DIV_2	(1 << 5)
+# define VGA0_PD_P1_SHIFT	0
+/** P1 value is 2 greater than this field */
+# define VGA0_PD_P1_MASK	(0x1f << 0)
+
+/* PCI D state control register */
+#define D_STATE		0x6104
+#define DSPCLK_GATE_D	0x6200
+
+/* I830 CRTC registers */
+#define HTOTAL_A	0x60000
+#define HBLANK_A	0x60004
+#define HSYNC_A		0x60008
+#define VTOTAL_A	0x6000c
+#define VBLANK_A	0x60010
+#define VSYNC_A		0x60014
+#define PIPEASRC	0x6001c
+#define BCLRPAT_A	0x60020
+#define VSYNCSHIFT_A	0x60028
+
+#define HTOTAL_B	0x61000
+#define HBLANK_B	0x61004
+#define HSYNC_B		0x61008
+#define VTOTAL_B	0x6100c
+#define VBLANK_B	0x61010
+#define VSYNC_B		0x61014
+#define PIPEBSRC	0x6101c
+#define BCLRPAT_B	0x61020
+#define VSYNCSHIFT_B	0x61028
+
+#define PP_STATUS	0x61200
+# define PP_ON					(1 << 31)
+/**
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+# define PP_READY				(1 << 30)
+# define PP_SEQUENCE_NONE			(0 << 28)
+# define PP_SEQUENCE_ON				(1 << 28)
+# define PP_SEQUENCE_OFF			(2 << 28)
+# define PP_SEQUENCE_MASK			0x30000000
+#define PP_CONTROL	0x61204
+# define POWER_TARGET_ON			(1 << 0)
+
+#define LVDSPP_ON       0x61208
+#define LVDSPP_OFF      0x6120c
+#define PP_CYCLE        0x61210
+
+#define PFIT_CONTROL	0x61230
+# define PFIT_ENABLE				(1 << 31)
+# define PFIT_PIPE_MASK				(3 << 29)
+# define PFIT_PIPE_SHIFT			29
+# define VERT_INTERP_DISABLE			(0 << 10)
+# define VERT_INTERP_BILINEAR			(1 << 10)
+# define VERT_INTERP_MASK			(3 << 10)
+# define VERT_AUTO_SCALE			(1 << 9)
+# define HORIZ_INTERP_DISABLE			(0 << 6)
+# define HORIZ_INTERP_BILINEAR			(1 << 6)
+# define HORIZ_INTERP_MASK			(3 << 6)
+# define HORIZ_AUTO_SCALE			(1 << 5)
+# define PANEL_8TO6_DITHER_ENABLE		(1 << 3)
+
+#define PFIT_PGM_RATIOS	0x61234
+# define PFIT_VERT_SCALE_MASK			0xfff00000
+# define PFIT_HORIZ_SCALE_MASK			0x0000fff0
+
+#define PFIT_AUTO_RATIOS	0x61238
+
+
+#define DPLL_A		0x06014
+#define DPLL_B		0x06018
+# define DPLL_VCO_ENABLE			(1 << 31)
+# define DPLL_DVO_HIGH_SPEED			(1 << 30)
+# define DPLL_SYNCLOCK_ENABLE			(1 << 29)
+# define DPLL_VGA_MODE_DIS			(1 << 28)
+# define DPLLB_MODE_DAC_SERIAL			(1 << 26) /* i915 */
+# define DPLLB_MODE_LVDS			(2 << 26) /* i915 */
+# define DPLL_MODE_MASK				(3 << 26)
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10	(0 << 24) /* i915 */
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5		(1 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_14		(0 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_7		(1 << 24) /* i915 */
+# define DPLL_P2_CLOCK_DIV_MASK			0x03000000 /* i915 */
+# define DPLL_FPA01_P1_POST_DIV_MASK		0x00ff0000 /* i915 */
+/**
+ *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+/**
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+# define DPLL_FPA01_P1_POST_DIV_SHIFT		16
+# define PLL_P2_DIVIDE_BY_4			(1 << 23) /* i830, required in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO			(1 << 21) /* i830 */
+# define PLL_REF_INPUT_DREFCLK			(0 << 13)
+# define PLL_REF_INPUT_TVCLKINA			(1 << 13) /* i830 */
+# define PLL_REF_INPUT_TVCLKINBC		(2 << 13) /* SDVO TVCLKIN */
+# define PLLB_REF_INPUT_SPREADSPECTRUMIN	(3 << 13)
+# define PLL_REF_INPUT_MASK			(3 << 13)
+# define PLL_LOAD_PULSE_PHASE_SHIFT		9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+# define PLL_LOAD_PULSE_PHASE_MASK		(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+# define DISPLAY_RATE_SELECT_FPA1		(1 << 8)
+
+/**
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+# define SDVO_MULTIPLIER_MASK			0x000000ff
+# define SDVO_MULTIPLIER_SHIFT_HIRES		4
+# define SDVO_MULTIPLIER_SHIFT_VGA		0
+
+/** @defgroup DPLL_MD
+ * @{
+ */
+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD		0x0601c
+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD		0x06020
+/**
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+# define DPLL_MD_UDI_DIVIDER_MASK		0x3f000000
+# define DPLL_MD_UDI_DIVIDER_SHIFT		24
+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+# define DPLL_MD_VGA_UDI_DIVIDER_MASK		0x003f0000
+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT		16
+/**
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+# define DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00
+# define DPLL_MD_UDI_MULTIPLIER_SHIFT		8
+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
+/** @} */
+
+#define DPLL_TEST		0x606c
+# define DPLLB_TEST_SDVO_DIV_1			(0 << 22)
+# define DPLLB_TEST_SDVO_DIV_2			(1 << 22)
+# define DPLLB_TEST_SDVO_DIV_4			(2 << 22)
+# define DPLLB_TEST_SDVO_DIV_MASK		(3 << 22)
+# define DPLLB_TEST_N_BYPASS			(1 << 19)
+# define DPLLB_TEST_M_BYPASS			(1 << 18)
+# define DPLLB_INPUT_BUFFER_ENABLE		(1 << 16)
+# define DPLLA_TEST_N_BYPASS			(1 << 3)
+# define DPLLA_TEST_M_BYPASS			(1 << 2)
+# define DPLLA_INPUT_BUFFER_ENABLE		(1 << 0)
+
+#define ADPA			0x61100
+#define ADPA_DAC_ENABLE		(1<<31)
+#define ADPA_DAC_DISABLE	0
+#define ADPA_PIPE_SELECT_MASK	(1<<30)
+#define ADPA_PIPE_A_SELECT	0
+#define ADPA_PIPE_B_SELECT	(1<<30)
+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_SETS_HVPOLARITY	0
+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_ENABLE	0
+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_ENABLE	0
+#define ADPA_VSYNC_ACTIVE_HIGH	(1<<4)
+#define ADPA_VSYNC_ACTIVE_LOW	0
+#define ADPA_HSYNC_ACTIVE_HIGH	(1<<3)
+#define ADPA_HSYNC_ACTIVE_LOW	0
+
+#define FPA0		0x06040
+#define FPA1		0x06044
+#define FPB0		0x06048
+#define FPB1		0x0604c
+# define FP_N_DIV_MASK				0x003f0000
+# define FP_N_DIV_SHIFT				16
+# define FP_M1_DIV_MASK				0x00003f00
+# define FP_M1_DIV_SHIFT			8
+# define FP_M2_DIV_MASK				0x0000003f
+# define FP_M2_DIV_SHIFT			0
+
+
+#define PORT_HOTPLUG_EN		0x61110
+# define SDVOB_HOTPLUG_INT_EN			(1 << 26)
+# define SDVOC_HOTPLUG_INT_EN			(1 << 25)
+# define TV_HOTPLUG_INT_EN			(1 << 18)
+# define CRT_HOTPLUG_INT_EN			(1 << 9)
+# define CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+
+#define PORT_HOTPLUG_STAT	0x61114
+# define CRT_HOTPLUG_INT_STATUS			(1 << 11)
+# define TV_HOTPLUG_INT_STATUS			(1 << 10)
+# define CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
+# define CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
+# define CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
+# define CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+# define SDVOC_HOTPLUG_INT_STATUS		(1 << 7)
+# define SDVOB_HOTPLUG_INT_STATUS		(1 << 6)
+
+#define SDVOB			0x61140
+#define SDVOC			0x61160
+#define SDVO_ENABLE				(1 << 31)
+#define SDVO_PIPE_B_SELECT			(1 << 30)
+#define SDVO_STALL_SELECT			(1 << 29)
+#define SDVO_INTERRUPT_ENABLE			(1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK			(7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT		23
+#define SDVO_PHASE_SELECT_MASK			(15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT		(6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT		(1 << 18)
+#define SDVOC_GANG_MODE				(1 << 16)
+#define SDVO_BORDER_ENABLE			(1 << 7)
+#define SDVOB_PCIE_CONCURRENCY			(1 << 3)
+#define SDVO_DETECTED				(1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK			((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK			(1 << 17)
+
+/** @defgroup LVDS
+ * @{
+ */
+/**
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS			0x61180
+/**
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+# define LVDS_PORT_EN			(1 << 31)
+/** Selects pipe B for LVDS data.  Must be set on pre-965. */
+# define LVDS_PIPEB_SELECT		(1 << 30)
+
+/**
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+# define LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+# define LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+# define LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
+/**
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+# define LVDS_A3_POWER_MASK		(3 << 6)
+# define LVDS_A3_POWER_DOWN		(0 << 6)
+# define LVDS_A3_POWER_UP		(3 << 6)
+/**
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+# define LVDS_CLKB_POWER_MASK		(3 << 4)
+# define LVDS_CLKB_POWER_DOWN		(0 << 4)
+# define LVDS_CLKB_POWER_UP		(3 << 4)
+
+/**
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+# define LVDS_B0B3_POWER_MASK		(3 << 2)
+# define LVDS_B0B3_POWER_DOWN		(0 << 2)
+# define LVDS_B0B3_POWER_UP		(3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE	(1<<31)
+#define PIPEACONF_DISABLE	0
+#define PIPEACONF_DOUBLE_WIDE	(1<<30)
+#define I965_PIPECONF_ACTIVE	(1<<30)
+#define PIPEACONF_SINGLE_WIDE	0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_PIPE_LOCKED	(1<<25)
+#define PIPEACONF_PALETTE	0
+#define PIPEACONF_GAMMA		(1<<24)
+#define PIPECONF_FORCE_BORDER	(1<<25)
+#define PIPECONF_PROGRESSIVE	(0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
+
+#define DSPARB	  0x70030
+#define DSPARB_CSTART_MASK	(0x7f << 7)
+#define DSPARB_CSTART_SHIFT	7
+#define DSPARB_BSTART_MASK	(0x7f)		 
+#define DSPARB_BSTART_SHIFT	0
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE	(1<<31)
+#define PIPEBCONF_DISABLE	0
+#define PIPEBCONF_DOUBLE_WIDE	(1<<30)
+#define PIPEBCONF_DISABLE	0
+#define PIPEBCONF_GAMMA		(1<<24)
+#define PIPEBCONF_PALETTE	0
+
+#define PIPEBGCMAXRED		0x71010
+#define PIPEBGCMAXGREEN		0x71014
+#define PIPEBGCMAXBLUE		0x71018
+#define PIPEBSTAT		0x71024
+#define PIPEBFRAMEHIGH		0x71040
+#define PIPEBFRAMEPIXEL		0x71044
+
+#define DSPACNTR		0x70180
+#define DSPBCNTR		0x71180
+#define DISPLAY_PLANE_ENABLE			(1<<31)
+#define DISPLAY_PLANE_DISABLE			0
+#define DISPPLANE_GAMMA_ENABLE			(1<<30)
+#define DISPPLANE_GAMMA_DISABLE			0
+#define DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
+#define DISPPLANE_8BPP				(0x2<<26)
+#define DISPPLANE_15_16BPP			(0x4<<26)
+#define DISPPLANE_16BPP				(0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
+#define DISPPLANE_32BPP				(0x7<<26)
+#define DISPPLANE_STEREO_ENABLE			(1<<25)
+#define DISPPLANE_STEREO_DISABLE		0
+#define DISPPLANE_SEL_PIPE_MASK			(1<<24)
+#define DISPPLANE_SEL_PIPE_A			0
+#define DISPPLANE_SEL_PIPE_B			(1<<24)
+#define DISPPLANE_SRC_KEY_ENABLE		(1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE		0
+#define DISPPLANE_LINE_DOUBLE			(1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE		0
+#define DISPPLANE_STEREO_POLARITY_FIRST		0
+#define DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE		0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA		0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY		(1)
+
+#define DSPABASE		0x70184
+#define DSPASTRIDE		0x70188
+
+#define DSPBBASE		0x71184
+#define DSPBADDR		DSPBBASE
+#define DSPBSTRIDE		0x71188
+
+#define DSPAKEYVAL		0x70194
+#define DSPAKEYMASK		0x70198
+
+#define DSPAPOS			0x7018C /* reserved */
+#define DSPASIZE		0x70190
+#define DSPBPOS			0x7118C
+#define DSPBSIZE		0x71190
+
+#define DSPASURF		0x7019C
+#define DSPATILEOFF		0x701A4
+
+#define DSPBSURF		0x7119C
+#define DSPBTILEOFF		0x711A4
+
+#define VGACNTRL		0x71400
+# define VGA_DISP_DISABLE			(1 << 31)
+# define VGA_2X_MODE				(1 << 30)
+# define VGA_PIPE_B_SELECT			(1 << 29)
+
+/*
+ * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+
+#define SWF0			0x71410
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF10			0x70410
+
+#define SWF30			0x72414
+
+/*
+ * Overlay registers.  These are overlay registers accessed via MMIO.
+ * Those loaded via the overlay register page are defined in i830_video.c.
+ */
+#define OVADD			0x30000
+
+#define DOVSTA			0x30008
+#define OC_BUF			(0x3<<20)
+
+#define OGAMC5			0x30010
+#define OGAMC4			0x30014
+#define OGAMC3			0x30018
+#define OGAMC2			0x3001c
+#define OGAMC1			0x30020
+#define OGAMC0			0x30024
+/*
+ * Palette registers
+ */
+#define PALETTE_A		0x0a000
+#define PALETTE_B		0x0a800
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I855(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+
+#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
+		        (dev)->pci_device == 0x27AE)
+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+		       (dev)->pci_device == 0x2982 || \
+		       (dev)->pci_device == 0x2992 || \
+		       (dev)->pci_device == 0x29A2 || \
+		       (dev)->pci_device == 0x2A02 || \
+		       (dev)->pci_device == 0x2A12 || \
+		       (dev)->pci_device == 0x2A42 || \
+		       (dev)->pci_device == 0x2E02 || \
+		       (dev)->pci_device == 0x2E12 || \
+		       (dev)->pci_device == 0x2E22)
+
+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
+
+#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
+
+#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
+		     (dev)->pci_device == 0x2E12 || \
+		     (dev)->pci_device == 0x2E22)
+
+#define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\
+			(dev)->pci_device == 0x29B2 ||	\
+			(dev)->pci_device == 0x29D2)
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+
+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+			IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
+
+#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev))
+
+#define PRIMARY_RINGBUFFER_SIZE         (128*1024)
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
new file mode 100644
index 000000000000..1fe68a251b75
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -0,0 +1,222 @@
+/**
+ * \file i915_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the i915 DRM.
+ *
+ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Alan Hourihane 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+
+typedef struct _drm_i915_batchbuffer32 {
+	int start;		/* agp offset */
+	int used;		/* nr bytes in use */
+	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
+	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
+	int num_cliprects;	/* mulitpass with multiple cliprects? */
+	u32 cliprects;		/* pointer to userspace cliprects */
+} drm_i915_batchbuffer32_t;
+
+static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
+				   unsigned long arg)
+{
+	drm_i915_batchbuffer32_t batchbuffer32;
+	drm_i915_batchbuffer_t __user *batchbuffer;
+
+	if (copy_from_user
+	    (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
+		return -EFAULT;
+
+	batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
+	if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
+	    || __put_user(batchbuffer32.start, &batchbuffer->start)
+	    || __put_user(batchbuffer32.used, &batchbuffer->used)
+	    || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
+	    || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
+	    || __put_user(batchbuffer32.num_cliprects,
+			  &batchbuffer->num_cliprects)
+	    || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
+			  &batchbuffer->cliprects))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_BATCHBUFFER,
+			 (unsigned long)batchbuffer);
+}
+
+typedef struct _drm_i915_cmdbuffer32 {
+	u32 buf;		/* pointer to userspace command buffer */
+	int sz;			/* nr bytes in buf */
+	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
+	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
+	int num_cliprects;	/* mulitpass with multiple cliprects? */
+	u32 cliprects;		/* pointer to userspace cliprects */
+} drm_i915_cmdbuffer32_t;
+
+static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_i915_cmdbuffer32_t cmdbuffer32;
+	drm_i915_cmdbuffer_t __user *cmdbuffer;
+
+	if (copy_from_user
+	    (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
+		return -EFAULT;
+
+	cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
+	if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
+	    || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
+			  &cmdbuffer->buf)
+	    || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
+	    || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
+	    || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
+	    || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
+	    || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
+			  &cmdbuffer->cliprects))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
+}
+
+typedef struct drm_i915_irq_emit32 {
+	u32 irq_seq;
+} drm_i915_irq_emit32_t;
+
+static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_i915_irq_emit32_t req32;
+	drm_i915_irq_emit_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user((int __user *)(unsigned long)req32.irq_seq,
+			  &request->irq_seq))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
+}
+typedef struct drm_i915_getparam32 {
+	int param;
+	u32 value;
+} drm_i915_getparam32_t;
+
+static int compat_i915_getparam(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_i915_getparam32_t req32;
+	drm_i915_getparam_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.param, &request->param)
+	    || __put_user((void __user *)(unsigned long)req32.value,
+			  &request->value))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
+}
+
+typedef struct drm_i915_mem_alloc32 {
+	int region;
+	int alignment;
+	int size;
+	u32 region_offset;	/* offset from start of fb or agp */
+} drm_i915_mem_alloc32_t;
+
+static int compat_i915_alloc(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_i915_mem_alloc32_t req32;
+	drm_i915_mem_alloc_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.region, &request->region)
+	    || __put_user(req32.alignment, &request->alignment)
+	    || __put_user(req32.size, &request->size)
+	    || __put_user((void __user *)(unsigned long)req32.region_offset,
+			  &request->region_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_ALLOC, (unsigned long)request);
+}
+
+drm_ioctl_compat_t *i915_compat_ioctls[] = {
+	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
+	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
+	[DRM_I915_GETPARAM] = compat_i915_getparam,
+	[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
+	[DRM_I915_ALLOC] = compat_i915_alloc
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+		fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	lock_kernel();		/* XXX for now */
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
new file mode 100644
index 000000000000..df036118b8b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -0,0 +1,623 @@
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define USER_INT_FLAG (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+
+#define MAX_NOPID ((u32)~0)
+
+/**
+ * Emit blits for scheduled buffer swaps.
+ *
+ * This function will be called with the HW lock held.
+ */
+static void i915_vblank_tasklet(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+	struct list_head *list, *tmp, hits, *hit;
+	int nhits, nrects, slice[2], upper[2], lower[2], i;
+	unsigned counter[2] = { atomic_read(&dev->vbl_received),
+				atomic_read(&dev->vbl_received2) };
+	struct drm_drawable_info *drw;
+	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	u32 cpp = dev_priv->cpp;
+	u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
+				XY_SRC_COPY_BLT_WRITE_ALPHA |
+				XY_SRC_COPY_BLT_WRITE_RGB)
+			     : XY_SRC_COPY_BLT_CMD;
+	u32 src_pitch = sarea_priv->pitch * cpp;
+	u32 dst_pitch = sarea_priv->pitch * cpp;
+	u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
+	RING_LOCALS;
+
+	if (IS_I965G(dev) && sarea_priv->front_tiled) {
+		cmd |= XY_SRC_COPY_BLT_DST_TILED;
+		dst_pitch >>= 2;
+	}
+	if (IS_I965G(dev) && sarea_priv->back_tiled) {
+		cmd |= XY_SRC_COPY_BLT_SRC_TILED;
+		src_pitch >>= 2;
+	}
+
+	DRM_DEBUG("\n");
+
+	INIT_LIST_HEAD(&hits);
+
+	nhits = nrects = 0;
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	/* Find buffer swaps scheduled for this vertical blank */
+	list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
+		drm_i915_vbl_swap_t *vbl_swap =
+			list_entry(list, drm_i915_vbl_swap_t, head);
+
+		if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
+			continue;
+
+		list_del(list);
+		dev_priv->swaps_pending--;
+
+		spin_unlock(&dev_priv->swaps_lock);
+		spin_lock(&dev->drw_lock);
+
+		drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
+
+		if (!drw) {
+			spin_unlock(&dev->drw_lock);
+			drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+			spin_lock(&dev_priv->swaps_lock);
+			continue;
+		}
+
+		list_for_each(hit, &hits) {
+			drm_i915_vbl_swap_t *swap_cmp =
+				list_entry(hit, drm_i915_vbl_swap_t, head);
+			struct drm_drawable_info *drw_cmp =
+				drm_get_drawable_info(dev, swap_cmp->drw_id);
+
+			if (drw_cmp &&
+			    drw_cmp->rects[0].y1 > drw->rects[0].y1) {
+				list_add_tail(list, hit);
+				break;
+			}
+		}
+
+		spin_unlock(&dev->drw_lock);
+
+		/* List of hits was empty, or we reached the end of it */
+		if (hit == &hits)
+			list_add_tail(list, hits.prev);
+
+		nhits++;
+
+		spin_lock(&dev_priv->swaps_lock);
+	}
+
+	if (nhits == 0) {
+		spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+		return;
+	}
+
+	spin_unlock(&dev_priv->swaps_lock);
+
+	i915_kernel_lost_context(dev);
+
+	if (IS_I965G(dev)) {
+		BEGIN_LP_RING(4);
+
+		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+		OUT_RING(0);
+		OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	} else {
+		BEGIN_LP_RING(6);
+
+		OUT_RING(GFX_OP_DRAWRECT_INFO);
+		OUT_RING(0);
+		OUT_RING(0);
+		OUT_RING(sarea_priv->width | sarea_priv->height << 16);
+		OUT_RING(sarea_priv->width | sarea_priv->height << 16);
+		OUT_RING(0);
+
+		ADVANCE_LP_RING();
+	}
+
+	sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
+
+	upper[0] = upper[1] = 0;
+	slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
+	slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
+	lower[0] = sarea_priv->pipeA_y + slice[0];
+	lower[1] = sarea_priv->pipeB_y + slice[0];
+
+	spin_lock(&dev->drw_lock);
+
+	/* Emit blits for buffer swaps, partitioning both outputs into as many
+	 * slices as there are buffer swaps scheduled in order to avoid tearing
+	 * (based on the assumption that a single buffer swap would always
+	 * complete before scanout starts).
+	 */
+	for (i = 0; i++ < nhits;
+	     upper[0] = lower[0], lower[0] += slice[0],
+	     upper[1] = lower[1], lower[1] += slice[1]) {
+		if (i == nhits)
+			lower[0] = lower[1] = sarea_priv->height;
+
+		list_for_each(hit, &hits) {
+			drm_i915_vbl_swap_t *swap_hit =
+				list_entry(hit, drm_i915_vbl_swap_t, head);
+			struct drm_clip_rect *rect;
+			int num_rects, pipe;
+			unsigned short top, bottom;
+
+			drw = drm_get_drawable_info(dev, swap_hit->drw_id);
+
+			if (!drw)
+				continue;
+
+			rect = drw->rects;
+			pipe = swap_hit->pipe;
+			top = upper[pipe];
+			bottom = lower[pipe];
+
+			for (num_rects = drw->num_rects; num_rects--; rect++) {
+				int y1 = max(rect->y1, top);
+				int y2 = min(rect->y2, bottom);
+
+				if (y1 >= y2)
+					continue;
+
+				BEGIN_LP_RING(8);
+
+				OUT_RING(cmd);
+				OUT_RING(ropcpp | dst_pitch);
+				OUT_RING((y1 << 16) | rect->x1);
+				OUT_RING((y2 << 16) | rect->x2);
+				OUT_RING(sarea_priv->front_offset);
+				OUT_RING((y1 << 16) | rect->x1);
+				OUT_RING(src_pitch);
+				OUT_RING(sarea_priv->back_offset);
+
+				ADVANCE_LP_RING();
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+	list_for_each_safe(hit, tmp, &hits) {
+		drm_i915_vbl_swap_t *swap_hit =
+			list_entry(hit, drm_i915_vbl_swap_t, head);
+
+		list_del(hit);
+
+		drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
+	}
+}
+
+irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u16 temp;
+	u32 pipea_stats, pipeb_stats;
+
+	pipea_stats = I915_READ(I915REG_PIPEASTAT);
+	pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
+
+	temp = I915_READ16(I915REG_INT_IDENTITY_R);
+
+	temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
+
+	DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
+
+	if (temp == 0)
+		return IRQ_NONE;
+
+	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+	(void) I915_READ16(I915REG_INT_IDENTITY_R);
+	DRM_READMEMORYBARRIER();
+
+	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+	if (temp & USER_INT_FLAG)
+		DRM_WAKEUP(&dev_priv->irq_queue);
+
+	if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
+		int vblank_pipe = dev_priv->vblank_pipe;
+
+		if ((vblank_pipe &
+		     (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
+		    == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
+			if (temp & VSYNC_PIPEA_FLAG)
+				atomic_inc(&dev->vbl_received);
+			if (temp & VSYNC_PIPEB_FLAG)
+				atomic_inc(&dev->vbl_received2);
+		} else if (((temp & VSYNC_PIPEA_FLAG) &&
+			    (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
+			   ((temp & VSYNC_PIPEB_FLAG) &&
+			    (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
+			atomic_inc(&dev->vbl_received);
+
+		DRM_WAKEUP(&dev->vbl_queue);
+		drm_vbl_send_signals(dev);
+
+		if (dev_priv->swaps_pending > 0)
+			drm_locked_tasklet(dev, i915_vblank_tasklet);
+		I915_WRITE(I915REG_PIPEASTAT,
+			pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
+			I915_VBLANK_CLEAR);
+		I915_WRITE(I915REG_PIPEBSTAT,
+			pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
+			I915_VBLANK_CLEAR);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int i915_emit_irq(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	i915_kernel_lost_context(dev);
+
+	DRM_DEBUG("\n");
+
+	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
+
+	if (dev_priv->counter > 0x7FFFFFFFUL)
+		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+
+	BEGIN_LP_RING(6);
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(20);
+	OUT_RING(dev_priv->counter);
+	OUT_RING(0);
+	OUT_RING(0);
+	OUT_RING(GFX_OP_USER_INTERRUPT);
+	ADVANCE_LP_RING();
+
+	return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int ret = 0;
+
+	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+		  READ_BREADCRUMB(dev_priv));
+
+	if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+		return 0;
+
+	dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+		    READ_BREADCRUMB(dev_priv) >= irq_nr);
+
+	if (ret == -EBUSY) {
+		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+	}
+
+	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+	return ret;
+}
+
+static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
+				      atomic_t *counter)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned int cur_vblank;
+	int ret = 0;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(counter))
+			- *sequence) <= (1<<23)));
+
+	*sequence = cur_vblank;
+
+	return ret;
+}
+
+
+int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+{
+	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
+}
+
+int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+{
+	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int i915_irq_emit(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_irq_emit_t *emit = data;
+	int result;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	result = i915_emit_irq(dev);
+
+	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int i915_irq_wait(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_irq_wait_t *irqwait = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static void i915_enable_interrupt (struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u16 flag;
+
+	flag = 0;
+	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
+		flag |= VSYNC_PIPEA_FLAG;
+	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
+		flag |= VSYNC_PIPEB_FLAG;
+
+	I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
+}
+
+/* Set the vblank monitor pipe
+ */
+int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_pipe_t *pipe = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
+		DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
+		return -EINVAL;
+	}
+
+	dev_priv->vblank_pipe = pipe->pipe;
+
+	i915_enable_interrupt (dev);
+
+	return 0;
+}
+
+int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_pipe_t *pipe = data;
+	u16 flag;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	flag = I915_READ(I915REG_INT_ENABLE_R);
+	pipe->pipe = 0;
+	if (flag & VSYNC_PIPEA_FLAG)
+		pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
+	if (flag & VSYNC_PIPEB_FLAG)
+		pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
+
+	return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+int i915_vblank_swap(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_swap_t *swap = data;
+	drm_i915_vbl_swap_t *vbl_swap;
+	unsigned int pipe, seqtype, curseq;
+	unsigned long irqflags;
+	struct list_head *list;
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dev_priv->sarea_priv->rotation) {
+		DRM_DEBUG("Rotation not supported\n");
+		return -EINVAL;
+	}
+
+	if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
+			     _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
+		DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
+		return -EINVAL;
+	}
+
+	pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+
+	seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
+
+	if (!(dev_priv->vblank_pipe & (1 << pipe))) {
+		DRM_ERROR("Invalid pipe %d\n", pipe);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+	if (!drm_get_drawable_info(dev, swap->drawable)) {
+		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+		DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
+		return -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+	curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
+
+	if (seqtype == _DRM_VBLANK_RELATIVE)
+		swap->sequence += curseq;
+
+	if ((curseq - swap->sequence) <= (1<<23)) {
+		if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
+			swap->sequence = curseq + 1;
+		} else {
+			DRM_DEBUG("Missed target sequence\n");
+			return -EINVAL;
+		}
+	}
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	list_for_each(list, &dev_priv->vbl_swaps.head) {
+		vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
+
+		if (vbl_swap->drw_id == swap->drawable &&
+		    vbl_swap->pipe == pipe &&
+		    vbl_swap->sequence == swap->sequence) {
+			spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+			DRM_DEBUG("Already scheduled\n");
+			return 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+
+	if (dev_priv->swaps_pending >= 100) {
+		DRM_DEBUG("Too many swaps queued\n");
+		return -EBUSY;
+	}
+
+	vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+
+	if (!vbl_swap) {
+		DRM_ERROR("Failed to allocate memory to queue swap\n");
+		return -ENOMEM;
+	}
+
+	DRM_DEBUG("\n");
+
+	vbl_swap->drw_id = swap->drawable;
+	vbl_swap->pipe = pipe;
+	vbl_swap->sequence = swap->sequence;
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
+	dev_priv->swaps_pending++;
+
+	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+
+	return 0;
+}
+
+/* drm_dma.h hooks
+*/
+void i915_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	I915_WRITE16(I915REG_HWSTAM, 0xfffe);
+	I915_WRITE16(I915REG_INT_MASK_R, 0x0);
+	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+}
+
+void i915_driver_irq_postinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	spin_lock_init(&dev_priv->swaps_lock);
+	INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+	dev_priv->swaps_pending = 0;
+
+	if (!dev_priv->vblank_pipe)
+		dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+	i915_enable_interrupt(dev);
+	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+}
+
+void i915_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u16 temp;
+
+	if (!dev_priv)
+		return;
+
+	I915_WRITE16(I915REG_HWSTAM, 0xffff);
+	I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
+	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+
+	temp = I915_READ16(I915REG_INT_IDENTITY_R);
+	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+}
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
new file mode 100644
index 000000000000..6126a60dc9cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -0,0 +1,386 @@
+/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* This memory manager is integrated into the global/local lru
+ * mechanisms used by the clients.  Specifically, it operates by
+ * setting the 'in_use' fields of the global LRU to indicate whether
+ * this region is privately allocated to a client.
+ *
+ * This does require the client to actually respect that field.
+ *
+ * Currently no effort is made to allocate 'private' memory in any
+ * clever way - the LRU information isn't used to determine which
+ * block to allocate, and the ring is drained prior to allocations --
+ * in other words allocation is expensive.
+ */
+static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	struct drm_tex_region *list;
+	unsigned shift, nr;
+	unsigned start;
+	unsigned end;
+	unsigned i;
+	int age;
+
+	shift = dev_priv->tex_lru_log_granularity;
+	nr = I915_NR_TEX_REGIONS;
+
+	start = p->start >> shift;
+	end = (p->start + p->size - 1) >> shift;
+
+	age = ++sarea_priv->texAge;
+	list = sarea_priv->texList;
+
+	/* Mark the regions with the new flag and update their age.  Move
+	 * them to head of list to preserve LRU semantics.
+	 */
+	for (i = start; i <= end; i++) {
+		list[i].in_use = in_use;
+		list[i].age = age;
+
+		/* remove_from_list(i)
+		 */
+		list[(unsigned)list[i].next].prev = list[i].prev;
+		list[(unsigned)list[i].prev].next = list[i].next;
+
+		/* insert_at_head(list, i)
+		 */
+		list[i].prev = nr;
+		list[i].next = list[nr].next;
+		list[(unsigned)list[nr].next].prev = i;
+		list[nr].next = i;
+	}
+}
+
+/* Very simple allocator for agp memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size,
+				     struct drm_file *file_priv)
+{
+	/* Maybe cut off the start of an existing block */
+	if (start > p->start) {
+		struct mem_block *newblock =
+		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
+		if (!newblock)
+			goto out;
+		newblock->start = start;
+		newblock->size = p->size - (start - p->start);
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size -= newblock->size;
+		p = newblock;
+	}
+
+	/* Maybe cut off the end of an existing block */
+	if (size < p->size) {
+		struct mem_block *newblock =
+		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
+		if (!newblock)
+			goto out;
+		newblock->start = start + size;
+		newblock->size = p->size - size;
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size = size;
+	}
+
+      out:
+	/* Our block is in the middle */
+	p->file_priv = file_priv;
+	return p;
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, int size,
+				     int align2, struct drm_file *file_priv)
+{
+	struct mem_block *p;
+	int mask = (1 << align2) - 1;
+
+	for (p = heap->next; p != heap; p = p->next) {
+		int start = (p->start + mask) & ~mask;
+		if (p->file_priv == NULL && start + size <= p->start + p->size)
+			return split_block(p, start, size, file_priv);
+	}
+
+	return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, int start)
+{
+	struct mem_block *p;
+
+	for (p = heap->next; p != heap; p = p->next)
+		if (p->start == start)
+			return p;
+
+	return NULL;
+}
+
+static void free_block(struct mem_block *p)
+{
+	p->file_priv = NULL;
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	if (p->next->file_priv == NULL) {
+		struct mem_block *q = p->next;
+		p->size += q->size;
+		p->next = q->next;
+		p->next->prev = p;
+		drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+	}
+
+	if (p->prev->file_priv == NULL) {
+		struct mem_block *q = p->prev;
+		q->size += p->size;
+		q->next = p->next;
+		q->next->prev = q;
+		drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
+	}
+}
+
+/* Initialize.  How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+	struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
+
+	if (!blocks)
+		return -ENOMEM;
+
+	*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
+	if (!*heap) {
+		drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
+		return -ENOMEM;
+	}
+
+	blocks->start = start;
+	blocks->size = size;
+	blocks->file_priv = NULL;
+	blocks->next = blocks->prev = *heap;
+
+	memset(*heap, 0, sizeof(**heap));
+	(*heap)->file_priv = (struct drm_file *) - 1;
+	(*heap)->next = (*heap)->prev = blocks;
+	return 0;
+}
+
+/* Free all blocks associated with the releasing file.
+ */
+void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
+		      struct mem_block *heap)
+{
+	struct mem_block *p;
+
+	if (!heap || !heap->next)
+		return;
+
+	for (p = heap->next; p != heap; p = p->next) {
+		if (p->file_priv == file_priv) {
+			p->file_priv = NULL;
+			mark_block(dev, p, 0);
+		}
+	}
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	for (p = heap->next; p != heap; p = p->next) {
+		while (p->file_priv == NULL && p->next->file_priv == NULL) {
+			struct mem_block *q = p->next;
+			p->size += q->size;
+			p->next = q->next;
+			p->next->prev = p;
+			drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+		}
+	}
+}
+
+/* Shutdown.
+ */
+void i915_mem_takedown(struct mem_block **heap)
+{
+	struct mem_block *p;
+
+	if (!*heap)
+		return;
+
+	for (p = (*heap)->next; p != *heap;) {
+		struct mem_block *q = p;
+		p = p->next;
+		drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+	}
+
+	drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
+	*heap = NULL;
+}
+
+static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
+{
+	switch (region) {
+	case I915_MEM_REGION_AGP:
+		return &dev_priv->agp_heap;
+	default:
+		return NULL;
+	}
+}
+
+/* IOCTL HANDLERS */
+
+int i915_mem_alloc(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_mem_alloc_t *alloc = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, alloc->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	/* Make things easier on ourselves: all allocations at least
+	 * 4k aligned.
+	 */
+	if (alloc->alignment < 12)
+		alloc->alignment = 12;
+
+	block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
+
+	if (!block)
+		return -ENOMEM;
+
+	mark_block(dev, block, 1);
+
+	if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+			     sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int i915_mem_free(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_mem_free_t *memfree = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, memfree->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	block = find_block(*heap, memfree->region_offset);
+	if (!block)
+		return -EFAULT;
+
+	if (block->file_priv != file_priv)
+		return -EPERM;
+
+	mark_block(dev, block, 0);
+	free_block(block);
+	return 0;
+}
+
+int i915_mem_init_heap(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_mem_init_heap_t *initheap = data;
+	struct mem_block **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, initheap->region);
+	if (!heap)
+		return -EFAULT;
+
+	if (*heap) {
+		DRM_ERROR("heap already initialized?");
+		return -EFAULT;
+	}
+
+	return init_heap(heap, initheap->start, initheap->size);
+}
+
+int i915_mem_destroy_heap( struct drm_device *dev, void *data,
+			   struct drm_file *file_priv )
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_mem_destroy_heap_t *destroyheap = data;
+	struct mem_block **heap;
+
+	if ( !dev_priv ) {
+		DRM_ERROR( "called with no initialization\n" );
+		return -EINVAL;
+	}
+
+	heap = get_heap( dev_priv, destroyheap->region );
+	if (!heap) {
+		DRM_ERROR("get_heap failed");
+		return -EFAULT;
+	}
+
+	if (!*heap) {
+		DRM_ERROR("heap not initialized?");
+		return -EFAULT;
+	}
+
+	i915_mem_takedown( heap );
+	return 0;
+}